HDDS-175. Refactor ContainerInfo to remove Pipeline object from it.
Contributed by Ajay Kumar.
This commit is contained in:
parent
93ac01cb59
commit
7ca4f0cefa
@ -20,6 +20,7 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
|
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB
|
import org.apache.hadoop.hdds.scm.protocolPB
|
||||||
@ -87,16 +88,17 @@ public static void setContainerSizeB(long size) {
|
|||||||
* @inheritDoc
|
* @inheritDoc
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ContainerInfo createContainer(String owner)
|
public ContainerWithPipeline createContainer(String owner)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
XceiverClientSpi client = null;
|
XceiverClientSpi client = null;
|
||||||
try {
|
try {
|
||||||
ContainerInfo container =
|
ContainerWithPipeline containerWithPipeline =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(), owner);
|
xceiverClientManager.getFactor(), owner);
|
||||||
Pipeline pipeline = container.getPipeline();
|
Pipeline pipeline = containerWithPipeline.getPipeline();
|
||||||
client = xceiverClientManager.acquireClient(pipeline, container.getContainerID());
|
client = xceiverClientManager.acquireClient(pipeline,
|
||||||
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
|
|
||||||
// Allocated State means that SCM has allocated this pipeline in its
|
// Allocated State means that SCM has allocated this pipeline in its
|
||||||
// namespace. The client needs to create the pipeline on the machines
|
// namespace. The client needs to create the pipeline on the machines
|
||||||
@ -106,8 +108,9 @@ public ContainerInfo createContainer(String owner)
|
|||||||
if (pipeline.getLifeCycleState() == ALLOCATED) {
|
if (pipeline.getLifeCycleState() == ALLOCATED) {
|
||||||
createPipeline(client, pipeline);
|
createPipeline(client, pipeline);
|
||||||
}
|
}
|
||||||
createContainer(client, container.getContainerID());
|
createContainer(client,
|
||||||
return container;
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
|
return containerWithPipeline;
|
||||||
} finally {
|
} finally {
|
||||||
if (client != null) {
|
if (client != null) {
|
||||||
xceiverClientManager.releaseClient(client);
|
xceiverClientManager.releaseClient(client);
|
||||||
@ -197,17 +200,17 @@ private void createPipeline(XceiverClientSpi client, Pipeline pipeline)
|
|||||||
* @inheritDoc
|
* @inheritDoc
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ContainerInfo createContainer(HddsProtos.ReplicationType type,
|
public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
|
||||||
HddsProtos.ReplicationFactor factor, String owner) throws IOException {
|
HddsProtos.ReplicationFactor factor, String owner) throws IOException {
|
||||||
XceiverClientSpi client = null;
|
XceiverClientSpi client = null;
|
||||||
try {
|
try {
|
||||||
// allocate container on SCM.
|
// allocate container on SCM.
|
||||||
ContainerInfo container =
|
ContainerWithPipeline containerWithPipeline =
|
||||||
storageContainerLocationClient.allocateContainer(type, factor,
|
storageContainerLocationClient.allocateContainer(type, factor,
|
||||||
owner);
|
owner);
|
||||||
Pipeline pipeline = container.getPipeline();
|
Pipeline pipeline = containerWithPipeline.getPipeline();
|
||||||
client = xceiverClientManager.acquireClient(pipeline,
|
client = xceiverClientManager.acquireClient(pipeline,
|
||||||
container.getContainerID());
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
|
|
||||||
// Allocated State means that SCM has allocated this pipeline in its
|
// Allocated State means that SCM has allocated this pipeline in its
|
||||||
// namespace. The client needs to create the pipeline on the machines
|
// namespace. The client needs to create the pipeline on the machines
|
||||||
@ -217,9 +220,10 @@ public ContainerInfo createContainer(HddsProtos.ReplicationType type,
|
|||||||
}
|
}
|
||||||
// connect to pipeline leader and allocate container on leader datanode.
|
// connect to pipeline leader and allocate container on leader datanode.
|
||||||
client = xceiverClientManager.acquireClient(pipeline,
|
client = xceiverClientManager.acquireClient(pipeline,
|
||||||
container.getContainerID());
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
createContainer(client, container.getContainerID());
|
createContainer(client,
|
||||||
return container;
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
|
return containerWithPipeline;
|
||||||
} finally {
|
} finally {
|
||||||
if (client != null) {
|
if (client != null) {
|
||||||
xceiverClientManager.releaseClient(client);
|
xceiverClientManager.releaseClient(client);
|
||||||
@ -256,24 +260,27 @@ public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Delete the container, this will release any resource it uses.
|
* Deletes an existing container.
|
||||||
* @param pipeline - Pipeline that represents the container.
|
*
|
||||||
* @param force - True to forcibly delete the container.
|
* @param containerId - ID of the container.
|
||||||
|
* @param pipeline - Pipeline that represents the container.
|
||||||
|
* @param force - true to forcibly delete the container.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void deleteContainer(long containerID, Pipeline pipeline, boolean force)
|
public void deleteContainer(long containerId, Pipeline pipeline,
|
||||||
throws IOException {
|
boolean force) throws IOException {
|
||||||
XceiverClientSpi client = null;
|
XceiverClientSpi client = null;
|
||||||
try {
|
try {
|
||||||
client = xceiverClientManager.acquireClient(pipeline, containerID);
|
client = xceiverClientManager.acquireClient(pipeline, containerId);
|
||||||
String traceID = UUID.randomUUID().toString();
|
String traceID = UUID.randomUUID().toString();
|
||||||
ContainerProtocolCalls.deleteContainer(client, containerID, force, traceID);
|
ContainerProtocolCalls
|
||||||
|
.deleteContainer(client, containerId, force, traceID);
|
||||||
storageContainerLocationClient
|
storageContainerLocationClient
|
||||||
.deleteContainer(containerID);
|
.deleteContainer(containerId);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Deleted container {}, leader: {}, machines: {} ",
|
LOG.debug("Deleted container {}, leader: {}, machines: {} ",
|
||||||
containerID,
|
containerId,
|
||||||
pipeline.getLeader(),
|
pipeline.getLeader(),
|
||||||
pipeline.getMachines());
|
pipeline.getMachines());
|
||||||
}
|
}
|
||||||
@ -284,6 +291,19 @@ public void deleteContainer(long containerID, Pipeline pipeline, boolean force)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete the container, this will release any resource it uses.
|
||||||
|
* @param containerID - containerID.
|
||||||
|
* @param force - True to forcibly delete the container.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void deleteContainer(long containerID, boolean force)
|
||||||
|
throws IOException {
|
||||||
|
ContainerWithPipeline info = getContainerWithPipeline(containerID);
|
||||||
|
deleteContainer(containerID, info.getPipeline(), force);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@ -297,9 +317,9 @@ public List<ContainerInfo> listContainer(long startContainerID,
|
|||||||
/**
|
/**
|
||||||
* Get meta data from an existing container.
|
* Get meta data from an existing container.
|
||||||
*
|
*
|
||||||
* @param pipeline - pipeline that represents the container.
|
* @param containerID - ID of the container.
|
||||||
* @return ContainerInfo - a message of protobuf which has basic info
|
* @param pipeline - Pipeline where the container is located.
|
||||||
* of a container.
|
* @return ContainerInfo
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
@ -325,6 +345,19 @@ public ContainerData readContainer(long containerID,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get meta data from an existing container.
|
||||||
|
* @param containerID - ID of the container.
|
||||||
|
* @return ContainerInfo - a message of protobuf which has basic info
|
||||||
|
* of a container.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ContainerData readContainer(long containerID) throws IOException {
|
||||||
|
ContainerWithPipeline info = getContainerWithPipeline(containerID);
|
||||||
|
return readContainer(containerID, info.getPipeline());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given an id, return the pipeline associated with the container.
|
* Given an id, return the pipeline associated with the container.
|
||||||
* @param containerId - String Container ID
|
* @param containerId - String Container ID
|
||||||
@ -337,6 +370,19 @@ public ContainerInfo getContainer(long containerId) throws
|
|||||||
return storageContainerLocationClient.getContainer(containerId);
|
return storageContainerLocationClient.getContainer(containerId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a container by Name -- Throws if the container does not exist.
|
||||||
|
*
|
||||||
|
* @param containerId - Container ID
|
||||||
|
* @return ContainerWithPipeline
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ContainerWithPipeline getContainerWithPipeline(long containerId)
|
||||||
|
throws IOException {
|
||||||
|
return storageContainerLocationClient.getContainerWithPipeline(containerId);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Close a container.
|
* Close a container.
|
||||||
*
|
*
|
||||||
@ -391,6 +437,19 @@ public void closeContainer(long containerId, Pipeline pipeline)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close a container.
|
||||||
|
*
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void closeContainer(long containerId)
|
||||||
|
throws IOException {
|
||||||
|
ContainerWithPipeline info = getContainerWithPipeline(containerId);
|
||||||
|
Pipeline pipeline = info.getPipeline();
|
||||||
|
closeContainer(containerId, pipeline);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the the current usage information.
|
* Get the the current usage information.
|
||||||
* @param containerID - ID of the container.
|
* @param containerID - ID of the container.
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
package org.apache.hadoop.hdds.scm.client;
|
package org.apache.hadoop.hdds.scm.client;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
||||||
@ -45,7 +46,7 @@ public interface ScmClient {
|
|||||||
* @return ContainerInfo
|
* @return ContainerInfo
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
ContainerInfo createContainer(String owner) throws IOException;
|
ContainerWithPipeline createContainer(String owner) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets a container by Name -- Throws if the container does not exist.
|
* Gets a container by Name -- Throws if the container does not exist.
|
||||||
@ -55,6 +56,14 @@ public interface ScmClient {
|
|||||||
*/
|
*/
|
||||||
ContainerInfo getContainer(long containerId) throws IOException;
|
ContainerInfo getContainer(long containerId) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets a container by Name -- Throws if the container does not exist.
|
||||||
|
* @param containerId - Container ID
|
||||||
|
* @return ContainerWithPipeline
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
ContainerWithPipeline getContainerWithPipeline(long containerId) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Close a container.
|
* Close a container.
|
||||||
*
|
*
|
||||||
@ -64,6 +73,14 @@ public interface ScmClient {
|
|||||||
*/
|
*/
|
||||||
void closeContainer(long containerId, Pipeline pipeline) throws IOException;
|
void closeContainer(long containerId, Pipeline pipeline) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close a container.
|
||||||
|
*
|
||||||
|
* @param containerId - ID of the container.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void closeContainer(long containerId) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes an existing container.
|
* Deletes an existing container.
|
||||||
* @param containerId - ID of the container.
|
* @param containerId - ID of the container.
|
||||||
@ -73,6 +90,14 @@ public interface ScmClient {
|
|||||||
*/
|
*/
|
||||||
void deleteContainer(long containerId, Pipeline pipeline, boolean force) throws IOException;
|
void deleteContainer(long containerId, Pipeline pipeline, boolean force) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deletes an existing container.
|
||||||
|
* @param containerId - ID of the container.
|
||||||
|
* @param force - true to forcibly delete the container.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void deleteContainer(long containerId, boolean force) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Lists a range of containers and get their info.
|
* Lists a range of containers and get their info.
|
||||||
*
|
*
|
||||||
@ -95,6 +120,15 @@ List<ContainerInfo> listContainer(long startContainerID,
|
|||||||
ContainerData readContainer(long containerID, Pipeline pipeline)
|
ContainerData readContainer(long containerID, Pipeline pipeline)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read meta data from an existing container.
|
||||||
|
* @param containerID - ID of the container.
|
||||||
|
* @return ContainerInfo
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
ContainerData readContainer(long containerID)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the container size -- Computed by SCM from Container Reports.
|
* Gets the container size -- Computed by SCM from Container Reports.
|
||||||
* @param containerID - ID of the container.
|
* @param containerID - ID of the container.
|
||||||
@ -110,7 +144,7 @@ ContainerData readContainer(long containerID, Pipeline pipeline)
|
|||||||
* @return ContainerInfo
|
* @return ContainerInfo
|
||||||
* @throws IOException - in case of error.
|
* @throws IOException - in case of error.
|
||||||
*/
|
*/
|
||||||
ContainerInfo createContainer(HddsProtos.ReplicationType type,
|
ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
|
||||||
HddsProtos.ReplicationFactor replicationFactor,
|
HddsProtos.ReplicationFactor replicationFactor,
|
||||||
String owner) throws IOException;
|
String owner) throws IOException;
|
||||||
|
|
||||||
|
@ -15,34 +15,39 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hdds.scm.container.common.helpers;
|
package org.apache.hadoop.hdds.scm.container.common.helpers;
|
||||||
|
|
||||||
|
import static java.lang.Math.max;
|
||||||
|
|
||||||
import com.fasterxml.jackson.annotation.JsonAutoDetect;
|
import com.fasterxml.jackson.annotation.JsonAutoDetect;
|
||||||
import com.fasterxml.jackson.annotation.JsonIgnore;
|
import com.fasterxml.jackson.annotation.JsonIgnore;
|
||||||
import com.fasterxml.jackson.annotation.PropertyAccessor;
|
import com.fasterxml.jackson.annotation.PropertyAccessor;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.fasterxml.jackson.databind.ObjectWriter;
|
import com.fasterxml.jackson.databind.ObjectWriter;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.commons.lang3.builder.EqualsBuilder;
|
import java.io.Externalizable;
|
||||||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
|
||||||
import org.apache.hadoop.util.Time;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.ObjectInput;
|
||||||
|
import java.io.ObjectOutput;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
import static java.lang.Math.max;
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
||||||
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class wraps ozone container info.
|
* Class wraps ozone container info.
|
||||||
*/
|
*/
|
||||||
public class ContainerInfo
|
public class ContainerInfo implements Comparator<ContainerInfo>,
|
||||||
implements Comparator<ContainerInfo>, Comparable<ContainerInfo> {
|
Comparable<ContainerInfo>, Externalizable {
|
||||||
|
|
||||||
private static final ObjectWriter WRITER;
|
private static final ObjectWriter WRITER;
|
||||||
|
private static final String SERIALIZATION_ERROR_MSG = "Java serialization not"
|
||||||
|
+ " supported. Use protobuf instead.";
|
||||||
|
|
||||||
static {
|
static {
|
||||||
ObjectMapper mapper = new ObjectMapper();
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
@ -53,7 +58,9 @@ public class ContainerInfo
|
|||||||
}
|
}
|
||||||
|
|
||||||
private HddsProtos.LifeCycleState state;
|
private HddsProtos.LifeCycleState state;
|
||||||
private Pipeline pipeline;
|
private String pipelineName;
|
||||||
|
private ReplicationFactor replicationFactor;
|
||||||
|
private ReplicationType replicationType;
|
||||||
// Bytes allocated by SCM for clients.
|
// Bytes allocated by SCM for clients.
|
||||||
private long allocatedBytes;
|
private long allocatedBytes;
|
||||||
// Actual container usage, updated through heartbeat.
|
// Actual container usage, updated through heartbeat.
|
||||||
@ -75,15 +82,17 @@ public class ContainerInfo
|
|||||||
ContainerInfo(
|
ContainerInfo(
|
||||||
long containerID,
|
long containerID,
|
||||||
HddsProtos.LifeCycleState state,
|
HddsProtos.LifeCycleState state,
|
||||||
Pipeline pipeline,
|
String pipelineName,
|
||||||
long allocatedBytes,
|
long allocatedBytes,
|
||||||
long usedBytes,
|
long usedBytes,
|
||||||
long numberOfKeys,
|
long numberOfKeys,
|
||||||
long stateEnterTime,
|
long stateEnterTime,
|
||||||
String owner,
|
String owner,
|
||||||
long deleteTransactionId) {
|
long deleteTransactionId,
|
||||||
|
ReplicationFactor replicationFactor,
|
||||||
|
ReplicationType repType) {
|
||||||
this.containerID = containerID;
|
this.containerID = containerID;
|
||||||
this.pipeline = pipeline;
|
this.pipelineName = pipelineName;
|
||||||
this.allocatedBytes = allocatedBytes;
|
this.allocatedBytes = allocatedBytes;
|
||||||
this.usedBytes = usedBytes;
|
this.usedBytes = usedBytes;
|
||||||
this.numberOfKeys = numberOfKeys;
|
this.numberOfKeys = numberOfKeys;
|
||||||
@ -92,6 +101,8 @@ public class ContainerInfo
|
|||||||
this.stateEnterTime = stateEnterTime;
|
this.stateEnterTime = stateEnterTime;
|
||||||
this.owner = owner;
|
this.owner = owner;
|
||||||
this.deleteTransactionId = deleteTransactionId;
|
this.deleteTransactionId = deleteTransactionId;
|
||||||
|
this.replicationFactor = replicationFactor;
|
||||||
|
this.replicationType = repType;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -102,16 +113,18 @@ public ContainerInfo() {
|
|||||||
|
|
||||||
public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
|
public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
|
||||||
ContainerInfo.Builder builder = new ContainerInfo.Builder();
|
ContainerInfo.Builder builder = new ContainerInfo.Builder();
|
||||||
builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline()));
|
return builder.setPipelineName(info.getPipelineName())
|
||||||
builder.setAllocatedBytes(info.getAllocatedBytes());
|
.setAllocatedBytes(info.getAllocatedBytes())
|
||||||
builder.setUsedBytes(info.getUsedBytes());
|
.setUsedBytes(info.getUsedBytes())
|
||||||
builder.setNumberOfKeys(info.getNumberOfKeys());
|
.setNumberOfKeys(info.getNumberOfKeys())
|
||||||
builder.setState(info.getState());
|
.setState(info.getState())
|
||||||
builder.setStateEnterTime(info.getStateEnterTime());
|
.setStateEnterTime(info.getStateEnterTime())
|
||||||
builder.setOwner(info.getOwner());
|
.setOwner(info.getOwner())
|
||||||
builder.setContainerID(info.getContainerID());
|
.setContainerID(info.getContainerID())
|
||||||
builder.setDeleteTransactionId(info.getDeleteTransactionId());
|
.setDeleteTransactionId(info.getDeleteTransactionId())
|
||||||
return builder.build();
|
.setReplicationFactor(info.getReplicationFactor())
|
||||||
|
.setReplicationType(info.getReplicationType())
|
||||||
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getContainerID() {
|
public long getContainerID() {
|
||||||
@ -130,8 +143,12 @@ public long getStateEnterTime() {
|
|||||||
return stateEnterTime;
|
return stateEnterTime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Pipeline getPipeline() {
|
public ReplicationFactor getReplicationFactor() {
|
||||||
return pipeline;
|
return replicationFactor;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPipelineName() {
|
||||||
|
return pipelineName;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getAllocatedBytes() {
|
public long getAllocatedBytes() {
|
||||||
@ -177,6 +194,10 @@ public long getLastUsed() {
|
|||||||
return lastUsed;
|
return lastUsed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ReplicationType getReplicationType() {
|
||||||
|
return replicationType;
|
||||||
|
}
|
||||||
|
|
||||||
public void updateLastUsedTime() {
|
public void updateLastUsedTime() {
|
||||||
lastUsed = Time.monotonicNow();
|
lastUsed = Time.monotonicNow();
|
||||||
}
|
}
|
||||||
@ -190,19 +211,17 @@ public void allocate(long size) {
|
|||||||
public HddsProtos.SCMContainerInfo getProtobuf() {
|
public HddsProtos.SCMContainerInfo getProtobuf() {
|
||||||
HddsProtos.SCMContainerInfo.Builder builder =
|
HddsProtos.SCMContainerInfo.Builder builder =
|
||||||
HddsProtos.SCMContainerInfo.newBuilder();
|
HddsProtos.SCMContainerInfo.newBuilder();
|
||||||
builder.setPipeline(getPipeline().getProtobufMessage());
|
return builder.setAllocatedBytes(getAllocatedBytes())
|
||||||
builder.setAllocatedBytes(getAllocatedBytes());
|
.setContainerID(getContainerID())
|
||||||
builder.setUsedBytes(getUsedBytes());
|
.setUsedBytes(getUsedBytes())
|
||||||
builder.setNumberOfKeys(getNumberOfKeys());
|
.setNumberOfKeys(getNumberOfKeys()).setState(getState())
|
||||||
builder.setState(state);
|
.setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID())
|
||||||
builder.setStateEnterTime(stateEnterTime);
|
.setDeleteTransactionId(getDeleteTransactionId())
|
||||||
builder.setContainerID(getContainerID());
|
.setPipelineName(getPipelineName())
|
||||||
builder.setDeleteTransactionId(deleteTransactionId);
|
.setReplicationFactor(getReplicationFactor())
|
||||||
|
.setReplicationType(getReplicationType())
|
||||||
if (getOwner() != null) {
|
.setOwner(getOwner())
|
||||||
builder.setOwner(getOwner());
|
.build();
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getOwner() {
|
public String getOwner() {
|
||||||
@ -217,7 +236,7 @@ public void setOwner(String owner) {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
return "ContainerInfo{"
|
return "ContainerInfo{"
|
||||||
+ "state=" + state
|
+ "state=" + state
|
||||||
+ ", pipeline=" + pipeline
|
+ ", pipelineName=" + pipelineName
|
||||||
+ ", stateEnterTime=" + stateEnterTime
|
+ ", stateEnterTime=" + stateEnterTime
|
||||||
+ ", owner=" + owner
|
+ ", owner=" + owner
|
||||||
+ '}';
|
+ '}';
|
||||||
@ -252,9 +271,7 @@ public boolean equals(Object o) {
|
|||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return new HashCodeBuilder(11, 811)
|
return new HashCodeBuilder(11, 811)
|
||||||
.append(getContainerID())
|
.append(getContainerID())
|
||||||
.append(pipeline.getFactor())
|
.append(getOwner())
|
||||||
.append(pipeline.getType())
|
|
||||||
.append(owner)
|
|
||||||
.toHashCode();
|
.toHashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,12 +344,44 @@ public void setData(byte[] data) {
|
|||||||
this.data = Arrays.copyOf(data, data.length);
|
this.data = Arrays.copyOf(data, data.length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Throws IOException as default java serialization is not supported. Use
|
||||||
|
* serialization via protobuf instead.
|
||||||
|
*
|
||||||
|
* @param out the stream to write the object to
|
||||||
|
* @throws IOException Includes any I/O exceptions that may occur
|
||||||
|
* @serialData Overriding methods should use this tag to describe
|
||||||
|
* the data layout of this Externalizable object.
|
||||||
|
* List the sequence of element types and, if possible,
|
||||||
|
* relate the element to a public/protected field and/or
|
||||||
|
* method of this Externalizable class.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void writeExternal(ObjectOutput out) throws IOException {
|
||||||
|
throw new IOException(SERIALIZATION_ERROR_MSG);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Throws IOException as default java serialization is not supported. Use
|
||||||
|
* serialization via protobuf instead.
|
||||||
|
*
|
||||||
|
* @param in the stream to read data from in order to restore the object
|
||||||
|
* @throws IOException if I/O errors occur
|
||||||
|
* @throws ClassNotFoundException If the class for an object being
|
||||||
|
* restored cannot be found.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void readExternal(ObjectInput in)
|
||||||
|
throws IOException, ClassNotFoundException {
|
||||||
|
throw new IOException(SERIALIZATION_ERROR_MSG);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder class for ContainerInfo.
|
* Builder class for ContainerInfo.
|
||||||
*/
|
*/
|
||||||
public static class Builder {
|
public static class Builder {
|
||||||
private HddsProtos.LifeCycleState state;
|
private HddsProtos.LifeCycleState state;
|
||||||
private Pipeline pipeline;
|
|
||||||
private long allocated;
|
private long allocated;
|
||||||
private long used;
|
private long used;
|
||||||
private long keys;
|
private long keys;
|
||||||
@ -340,6 +389,25 @@ public static class Builder {
|
|||||||
private String owner;
|
private String owner;
|
||||||
private long containerID;
|
private long containerID;
|
||||||
private long deleteTransactionId;
|
private long deleteTransactionId;
|
||||||
|
private String pipelineName;
|
||||||
|
private ReplicationFactor replicationFactor;
|
||||||
|
private ReplicationType replicationType;
|
||||||
|
|
||||||
|
public Builder setReplicationType(
|
||||||
|
ReplicationType replicationType) {
|
||||||
|
this.replicationType = replicationType;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder setPipelineName(String pipelineName) {
|
||||||
|
this.pipelineName = pipelineName;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Builder setReplicationFactor(ReplicationFactor repFactor) {
|
||||||
|
this.replicationFactor = repFactor;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
public Builder setContainerID(long id) {
|
public Builder setContainerID(long id) {
|
||||||
Preconditions.checkState(id >= 0);
|
Preconditions.checkState(id >= 0);
|
||||||
@ -352,11 +420,6 @@ public Builder setState(HddsProtos.LifeCycleState lifeCycleState) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder setPipeline(Pipeline containerPipeline) {
|
|
||||||
this.pipeline = containerPipeline;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Builder setAllocatedBytes(long bytesAllocated) {
|
public Builder setAllocatedBytes(long bytesAllocated) {
|
||||||
this.allocated = bytesAllocated;
|
this.allocated = bytesAllocated;
|
||||||
return this;
|
return this;
|
||||||
@ -388,9 +451,9 @@ public Builder setDeleteTransactionId(long deleteTransactionId) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public ContainerInfo build() {
|
public ContainerInfo build() {
|
||||||
return new
|
return new ContainerInfo(containerID, state, pipelineName, allocated,
|
||||||
ContainerInfo(containerID, state, pipeline, allocated,
|
used, keys, stateEnterTime, owner, deleteTransactionId,
|
||||||
used, keys, stateEnterTime, owner, deleteTransactionId);
|
replicationFactor, replicationType);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,131 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdds.scm.container.common.helpers;
|
||||||
|
|
||||||
|
import java.util.Comparator;
|
||||||
|
import org.apache.commons.lang3.builder.EqualsBuilder;
|
||||||
|
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class wraps ozone container info.
|
||||||
|
*/
|
||||||
|
public class ContainerWithPipeline
|
||||||
|
implements Comparator<ContainerWithPipeline>, Comparable<ContainerWithPipeline> {
|
||||||
|
|
||||||
|
private final ContainerInfo containerInfo;
|
||||||
|
private final Pipeline pipeline;
|
||||||
|
|
||||||
|
public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) {
|
||||||
|
this.containerInfo = containerInfo;
|
||||||
|
this.pipeline = pipeline;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ContainerInfo getContainerInfo() {
|
||||||
|
return containerInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Pipeline getPipeline() {
|
||||||
|
return pipeline;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ContainerWithPipeline fromProtobuf(HddsProtos.ContainerWithPipeline allocatedContainer) {
|
||||||
|
return new ContainerWithPipeline(
|
||||||
|
ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
|
||||||
|
Pipeline.getFromProtoBuf(allocatedContainer.getPipeline()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public HddsProtos.ContainerWithPipeline getProtobuf() {
|
||||||
|
HddsProtos.ContainerWithPipeline.Builder builder =
|
||||||
|
HddsProtos.ContainerWithPipeline.newBuilder();
|
||||||
|
builder.setContainerInfo(getContainerInfo().getProtobuf())
|
||||||
|
.setPipeline(getPipeline().getProtobufMessage());
|
||||||
|
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return containerInfo.toString() + " | " + pipeline.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ContainerWithPipeline that = (ContainerWithPipeline) o;
|
||||||
|
|
||||||
|
return new EqualsBuilder()
|
||||||
|
.append(getContainerInfo(), that.getContainerInfo())
|
||||||
|
.append(getPipeline(), that.getPipeline())
|
||||||
|
.isEquals();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return new HashCodeBuilder(11, 811)
|
||||||
|
.append(getContainerInfo())
|
||||||
|
.append(getPipeline())
|
||||||
|
.toHashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compares its two arguments for order. Returns a negative integer, zero, or
|
||||||
|
* a positive integer as the first argument is less than, equal to, or greater
|
||||||
|
* than the second.<p>
|
||||||
|
*
|
||||||
|
* @param o1 the first object to be compared.
|
||||||
|
* @param o2 the second object to be compared.
|
||||||
|
* @return a negative integer, zero, or a positive integer as the first
|
||||||
|
* argument is less than, equal to, or greater than the second.
|
||||||
|
* @throws NullPointerException if an argument is null and this comparator
|
||||||
|
* does not permit null arguments
|
||||||
|
* @throws ClassCastException if the arguments' types prevent them from
|
||||||
|
* being compared by this comparator.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) {
|
||||||
|
return o1.getContainerInfo().compareTo(o2.getContainerInfo());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compares this object with the specified object for order. Returns a
|
||||||
|
* negative integer, zero, or a positive integer as this object is less than,
|
||||||
|
* equal to, or greater than the specified object.
|
||||||
|
*
|
||||||
|
* @param o the object to be compared.
|
||||||
|
* @return a negative integer, zero, or a positive integer as this object is
|
||||||
|
* less than, equal to, or greater than the specified object.
|
||||||
|
* @throws NullPointerException if the specified object is null
|
||||||
|
* @throws ClassCastException if the specified object's type prevents it
|
||||||
|
* from being compared to this object.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public int compareTo(ContainerWithPipeline o) {
|
||||||
|
return this.compare(this, o);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -18,6 +18,7 @@
|
|||||||
package org.apache.hadoop.hdds.scm.protocol;
|
package org.apache.hadoop.hdds.scm.protocol;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.scm.ScmInfo;
|
import org.apache.hadoop.hdds.scm.ScmInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
@ -38,7 +39,7 @@ public interface StorageContainerLocationProtocol {
|
|||||||
* set of datanodes that should be used creating this container.
|
* set of datanodes that should be used creating this container.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType,
|
ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType replicationType,
|
||||||
HddsProtos.ReplicationFactor factor, String owner)
|
HddsProtos.ReplicationFactor factor, String owner)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
@ -53,6 +54,16 @@ ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType,
|
|||||||
*/
|
*/
|
||||||
ContainerInfo getContainer(long containerID) throws IOException;
|
ContainerInfo getContainer(long containerID) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ask SCM the location of the container. SCM responds with a group of
|
||||||
|
* nodes where this container and its replicas are located.
|
||||||
|
*
|
||||||
|
* @param containerID - ID of the container.
|
||||||
|
* @return ContainerWithPipeline - the container info with the pipeline.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ask SCM a list of containers with a range of container names
|
* Ask SCM a list of containers with a range of container names
|
||||||
* and the limit of count.
|
* and the limit of count.
|
||||||
|
@ -20,7 +20,10 @@
|
|||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
|
||||||
import org.apache.hadoop.hdds.scm.ScmInfo;
|
import org.apache.hadoop.hdds.scm.ScmInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
||||||
@ -95,7 +98,7 @@ public StorageContainerLocationProtocolClientSideTranslatorPB(
|
|||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
|
public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType type,
|
||||||
HddsProtos.ReplicationFactor factor, String owner) throws IOException {
|
HddsProtos.ReplicationFactor factor, String owner) throws IOException {
|
||||||
|
|
||||||
ContainerRequestProto request = ContainerRequestProto.newBuilder()
|
ContainerRequestProto request = ContainerRequestProto.newBuilder()
|
||||||
@ -114,7 +117,7 @@ public ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
|
|||||||
throw new IOException(response.hasErrorMessage() ?
|
throw new IOException(response.hasErrorMessage() ?
|
||||||
response.getErrorMessage() : "Allocate container failed.");
|
response.getErrorMessage() : "Allocate container failed.");
|
||||||
}
|
}
|
||||||
return ContainerInfo.fromProtobuf(response.getContainerInfo());
|
return ContainerWithPipeline.fromProtobuf(response.getContainerWithPipeline());
|
||||||
}
|
}
|
||||||
|
|
||||||
public ContainerInfo getContainer(long containerID) throws IOException {
|
public ContainerInfo getContainer(long containerID) throws IOException {
|
||||||
@ -133,6 +136,25 @@ public ContainerInfo getContainer(long containerID) throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
*/
|
||||||
|
public ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException {
|
||||||
|
Preconditions.checkState(containerID >= 0,
|
||||||
|
"Container ID cannot be negative");
|
||||||
|
GetContainerWithPipelineRequestProto request = GetContainerWithPipelineRequestProto
|
||||||
|
.newBuilder()
|
||||||
|
.setContainerID(containerID)
|
||||||
|
.build();
|
||||||
|
try {
|
||||||
|
GetContainerWithPipelineResponseProto response =
|
||||||
|
rpcProxy.getContainerWithPipeline(NULL_RPC_CONTROLLER, request);
|
||||||
|
return ContainerWithPipeline.fromProtobuf(response.getContainerWithPipeline());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
|
@ -21,7 +21,10 @@
|
|||||||
import com.google.protobuf.RpcController;
|
import com.google.protobuf.RpcController;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
|
||||||
import org.apache.hadoop.hdds.scm.ScmInfo;
|
import org.apache.hadoop.hdds.scm.ScmInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
|
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
|
||||||
@ -82,10 +85,11 @@ public StorageContainerLocationProtocolServerSideTranslatorPB(
|
|||||||
public ContainerResponseProto allocateContainer(RpcController unused,
|
public ContainerResponseProto allocateContainer(RpcController unused,
|
||||||
ContainerRequestProto request) throws ServiceException {
|
ContainerRequestProto request) throws ServiceException {
|
||||||
try {
|
try {
|
||||||
ContainerInfo container = impl.allocateContainer(request.getReplicationType(),
|
ContainerWithPipeline containerWithPipeline = impl
|
||||||
request.getReplicationFactor(), request.getOwner());
|
.allocateContainer(request.getReplicationType(),
|
||||||
|
request.getReplicationFactor(), request.getOwner());
|
||||||
return ContainerResponseProto.newBuilder()
|
return ContainerResponseProto.newBuilder()
|
||||||
.setContainerInfo(container.getProtobuf())
|
.setContainerWithPipeline(containerWithPipeline.getProtobuf())
|
||||||
.setErrorCode(ContainerResponseProto.Error.success)
|
.setErrorCode(ContainerResponseProto.Error.success)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@ -108,6 +112,21 @@ public GetContainerResponseProto getContainer(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetContainerWithPipelineResponseProto getContainerWithPipeline(
|
||||||
|
RpcController controller, GetContainerWithPipelineRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
try {
|
||||||
|
ContainerWithPipeline container = impl
|
||||||
|
.getContainerWithPipeline(request.getContainerID());
|
||||||
|
return GetContainerWithPipelineResponseProto.newBuilder()
|
||||||
|
.setContainerWithPipeline(container.getProtobuf())
|
||||||
|
.build();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SCMListContainerResponseProto listContainer(RpcController controller,
|
public SCMListContainerResponseProto listContainer(RpcController controller,
|
||||||
SCMListContainerRequestProto request) throws ServiceException {
|
SCMListContainerRequestProto request) throws ServiceException {
|
||||||
|
@ -52,7 +52,7 @@ message ContainerResponseProto {
|
|||||||
errorContainerMissing = 3;
|
errorContainerMissing = 3;
|
||||||
}
|
}
|
||||||
required Error errorCode = 1;
|
required Error errorCode = 1;
|
||||||
required SCMContainerInfo containerInfo = 2;
|
required ContainerWithPipeline containerWithPipeline = 2;
|
||||||
optional string errorMessage = 3;
|
optional string errorMessage = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,6 +64,14 @@ message GetContainerResponseProto {
|
|||||||
required SCMContainerInfo containerInfo = 1;
|
required SCMContainerInfo containerInfo = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message GetContainerWithPipelineRequestProto {
|
||||||
|
required int64 containerID = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetContainerWithPipelineResponseProto {
|
||||||
|
required ContainerWithPipeline containerWithPipeline = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message SCMListContainerRequestProto {
|
message SCMListContainerRequestProto {
|
||||||
required uint32 count = 1;
|
required uint32 count = 1;
|
||||||
optional uint64 startContainerID = 2;
|
optional uint64 startContainerID = 2;
|
||||||
@ -171,6 +179,11 @@ service StorageContainerLocationProtocolService {
|
|||||||
*/
|
*/
|
||||||
rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
|
rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the pipeline for a given container.
|
||||||
|
*/
|
||||||
|
rpc getContainerWithPipeline(GetContainerWithPipelineRequestProto) returns (GetContainerWithPipelineResponseProto);
|
||||||
|
|
||||||
rpc listContainer(SCMListContainerRequestProto) returns (SCMListContainerResponseProto);
|
rpc listContainer(SCMListContainerRequestProto) returns (SCMListContainerResponseProto);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -132,7 +132,7 @@ enum LifeCycleEvent {
|
|||||||
message SCMContainerInfo {
|
message SCMContainerInfo {
|
||||||
required int64 containerID = 1;
|
required int64 containerID = 1;
|
||||||
required LifeCycleState state = 2;
|
required LifeCycleState state = 2;
|
||||||
required Pipeline pipeline = 3;
|
optional string pipelineName = 3;
|
||||||
// This is not total size of container, but space allocated by SCM for
|
// This is not total size of container, but space allocated by SCM for
|
||||||
// clients to write blocks
|
// clients to write blocks
|
||||||
required uint64 allocatedBytes = 4;
|
required uint64 allocatedBytes = 4;
|
||||||
@ -141,6 +141,13 @@ message SCMContainerInfo {
|
|||||||
optional int64 stateEnterTime = 7;
|
optional int64 stateEnterTime = 7;
|
||||||
required string owner = 8;
|
required string owner = 8;
|
||||||
optional int64 deleteTransactionId = 9;
|
optional int64 deleteTransactionId = 9;
|
||||||
|
required ReplicationFactor replicationFactor = 10;
|
||||||
|
required ReplicationType replicationType = 11;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ContainerWithPipeline {
|
||||||
|
required SCMContainerInfo containerInfo = 1;
|
||||||
|
required Pipeline pipeline = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetScmInfoRequestProto {
|
message GetScmInfoRequestProto {
|
||||||
|
@ -16,10 +16,12 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdds.scm.block;
|
package org.apache.hadoop.hdds.scm.block;
|
||||||
|
|
||||||
|
import java.util.UUID;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.hdds.scm.container.Mapping;
|
import org.apache.hadoop.hdds.scm.container.Mapping;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
||||||
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
||||||
@ -156,13 +158,13 @@ private void preAllocateContainers(int count, ReplicationType type,
|
|||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
ContainerInfo containerInfo = null;
|
ContainerWithPipeline containerWithPipeline = null;
|
||||||
try {
|
try {
|
||||||
// TODO: Fix this later when Ratis is made the Default.
|
// TODO: Fix this later when Ratis is made the Default.
|
||||||
containerInfo = containerManager.allocateContainer(type, factor,
|
containerWithPipeline = containerManager.allocateContainer(type, factor,
|
||||||
owner);
|
owner);
|
||||||
|
|
||||||
if (containerInfo == null) {
|
if (containerWithPipeline == null) {
|
||||||
LOG.warn("Unable to allocate container.");
|
LOG.warn("Unable to allocate container.");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -231,30 +233,27 @@ public AllocatedBlock allocateBlock(final long size,
|
|||||||
can use different kind of policies.
|
can use different kind of policies.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ContainerInfo containerInfo;
|
ContainerWithPipeline containerWithPipeline;
|
||||||
|
|
||||||
// Look for ALLOCATED container that matches all other parameters.
|
// Look for ALLOCATED container that matches all other parameters.
|
||||||
containerInfo =
|
containerWithPipeline = containerManager
|
||||||
containerManager
|
.getMatchingContainerWithPipeline(size, owner, type, factor,
|
||||||
.getStateManager()
|
HddsProtos.LifeCycleState.ALLOCATED);
|
||||||
.getMatchingContainer(
|
if (containerWithPipeline != null) {
|
||||||
size, owner, type, factor, HddsProtos.LifeCycleState
|
containerManager.updateContainerState(
|
||||||
.ALLOCATED);
|
containerWithPipeline.getContainerInfo().getContainerID(),
|
||||||
if (containerInfo != null) {
|
|
||||||
containerManager.updateContainerState(containerInfo.getContainerID(),
|
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED);
|
return newBlock(containerWithPipeline,
|
||||||
|
HddsProtos.LifeCycleState.ALLOCATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since we found no allocated containers that match our criteria, let us
|
// Since we found no allocated containers that match our criteria, let us
|
||||||
// look for OPEN containers that match the criteria.
|
// look for OPEN containers that match the criteria.
|
||||||
containerInfo =
|
containerWithPipeline = containerManager
|
||||||
containerManager
|
.getMatchingContainerWithPipeline(size, owner, type, factor,
|
||||||
.getStateManager()
|
HddsProtos.LifeCycleState.OPEN);
|
||||||
.getMatchingContainer(size, owner, type, factor, HddsProtos
|
if (containerWithPipeline != null) {
|
||||||
.LifeCycleState.OPEN);
|
return newBlock(containerWithPipeline, HddsProtos.LifeCycleState.OPEN);
|
||||||
if (containerInfo != null) {
|
|
||||||
return newBlock(containerInfo, HddsProtos.LifeCycleState.OPEN);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We found neither ALLOCATED or OPEN Containers. This generally means
|
// We found neither ALLOCATED or OPEN Containers. This generally means
|
||||||
@ -264,16 +263,15 @@ public AllocatedBlock allocateBlock(final long size,
|
|||||||
preAllocateContainers(containerProvisionBatchSize, type, factor, owner);
|
preAllocateContainers(containerProvisionBatchSize, type, factor, owner);
|
||||||
|
|
||||||
// Since we just allocated a set of containers this should work
|
// Since we just allocated a set of containers this should work
|
||||||
containerInfo =
|
containerWithPipeline = containerManager
|
||||||
containerManager
|
.getMatchingContainerWithPipeline(size, owner, type, factor,
|
||||||
.getStateManager()
|
HddsProtos.LifeCycleState.ALLOCATED);
|
||||||
.getMatchingContainer(
|
if (containerWithPipeline != null) {
|
||||||
size, owner, type, factor, HddsProtos.LifeCycleState
|
containerManager.updateContainerState(
|
||||||
.ALLOCATED);
|
containerWithPipeline.getContainerInfo().getContainerID(),
|
||||||
if (containerInfo != null) {
|
|
||||||
containerManager.updateContainerState(containerInfo.getContainerID(),
|
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED);
|
return newBlock(containerWithPipeline,
|
||||||
|
HddsProtos.LifeCycleState.ALLOCATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
// we have tried all strategies we know and but somehow we are not able
|
// we have tried all strategies we know and but somehow we are not able
|
||||||
@ -290,18 +288,28 @@ public AllocatedBlock allocateBlock(final long size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getChannelName(ReplicationType type) {
|
||||||
|
switch (type) {
|
||||||
|
case RATIS:
|
||||||
|
return "RA" + UUID.randomUUID().toString().substring(3);
|
||||||
|
case STAND_ALONE:
|
||||||
|
return "SA" + UUID.randomUUID().toString().substring(3);
|
||||||
|
default:
|
||||||
|
return "RA" + UUID.randomUUID().toString().substring(3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* newBlock - returns a new block assigned to a container.
|
* newBlock - returns a new block assigned to a container.
|
||||||
*
|
*
|
||||||
* @param containerInfo - Container Info.
|
* @param containerWithPipeline - Container Info.
|
||||||
* @param state - Current state of the container.
|
* @param state - Current state of the container.
|
||||||
* @return AllocatedBlock
|
* @return AllocatedBlock
|
||||||
*/
|
*/
|
||||||
private AllocatedBlock newBlock(
|
private AllocatedBlock newBlock(ContainerWithPipeline containerWithPipeline,
|
||||||
ContainerInfo containerInfo, HddsProtos.LifeCycleState state)
|
HddsProtos.LifeCycleState state) throws IOException {
|
||||||
throws IOException {
|
ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
|
||||||
|
if (containerWithPipeline.getPipeline().getDatanodes().size() == 0) {
|
||||||
if (containerInfo.getPipeline().getMachines().size() == 0) {
|
|
||||||
LOG.error("Pipeline Machine count is zero.");
|
LOG.error("Pipeline Machine count is zero.");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -317,7 +325,7 @@ private AllocatedBlock newBlock(
|
|||||||
AllocatedBlock.Builder abb =
|
AllocatedBlock.Builder abb =
|
||||||
new AllocatedBlock.Builder()
|
new AllocatedBlock.Builder()
|
||||||
.setBlockID(new BlockID(containerID, localID))
|
.setBlockID(new BlockID(containerID, localID))
|
||||||
.setPipeline(containerInfo.getPipeline())
|
.setPipeline(containerWithPipeline.getPipeline())
|
||||||
.setShouldCreateContainer(createContainer);
|
.setShouldCreateContainer(createContainer);
|
||||||
LOG.trace("New block allocated : {} Container ID: {}", localID,
|
LOG.trace("New block allocated : {} Container ID: {}", localID,
|
||||||
containerID);
|
containerID);
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
import com.google.common.collect.ArrayListMultimap;
|
import com.google.common.collect.ArrayListMultimap;
|
||||||
import org.apache.hadoop.hdds.scm.container.Mapping;
|
import org.apache.hadoop.hdds.scm.container.Mapping;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
|
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
|
||||||
@ -29,6 +28,7 @@
|
|||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A wrapper class to hold info about datanode and all deleted block
|
* A wrapper class to hold info about datanode and all deleted block
|
||||||
@ -54,21 +54,22 @@ public class DatanodeDeletedBlockTransactions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void addTransaction(DeletedBlocksTransaction tx) throws IOException {
|
public void addTransaction(DeletedBlocksTransaction tx) throws IOException {
|
||||||
ContainerInfo info = null;
|
Pipeline pipeline = null;
|
||||||
try {
|
try {
|
||||||
info = mappingService.getContainer(tx.getContainerID());
|
pipeline = mappingService.getContainerWithPipeline(tx.getContainerID())
|
||||||
|
.getPipeline();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
SCMBlockDeletingService.LOG.warn("Got container info error.", e);
|
SCMBlockDeletingService.LOG.warn("Got container info error.", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info == null) {
|
if (pipeline == null) {
|
||||||
SCMBlockDeletingService.LOG.warn(
|
SCMBlockDeletingService.LOG.warn(
|
||||||
"Container {} not found, continue to process next",
|
"Container {} not found, continue to process next",
|
||||||
tx.getContainerID());
|
tx.getContainerID());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (DatanodeDetails dd : info.getPipeline().getMachines()) {
|
for (DatanodeDetails dd : pipeline.getMachines()) {
|
||||||
UUID dnID = dd.getUuid();
|
UUID dnID = dd.getUuid();
|
||||||
if (transactions.containsKey(dnID)) {
|
if (transactions.containsKey(dnID)) {
|
||||||
List<DeletedBlocksTransaction> txs = transactions.get(dnID);
|
List<DeletedBlocksTransaction> txs = transactions.get(dnID);
|
||||||
|
@ -16,9 +16,11 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdds.scm.container;
|
package org.apache.hadoop.hdds.scm.container;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
|
||||||
import org.apache.hadoop.hdds.server.events.EventHandler;
|
import org.apache.hadoop.hdds.server.events.EventHandler;
|
||||||
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
import org.apache.hadoop.hdds.server.events.EventPublisher;
|
||||||
@ -54,22 +56,32 @@ public void onMessage(ContainerID containerID, EventPublisher publisher) {
|
|||||||
|
|
||||||
LOG.info("Close container Event triggered for container : {}",
|
LOG.info("Close container Event triggered for container : {}",
|
||||||
containerID.getId());
|
containerID.getId());
|
||||||
ContainerStateManager stateManager = containerManager.getStateManager();
|
ContainerWithPipeline containerWithPipeline = null;
|
||||||
ContainerInfo info = stateManager.getContainer(containerID);
|
ContainerInfo info;
|
||||||
if (info == null) {
|
try {
|
||||||
LOG.info("Container with id : {} does not exist", containerID.getId());
|
containerWithPipeline = containerManager.getContainerWithPipeline(containerID.getId());
|
||||||
|
info = containerWithPipeline.getContainerInfo();
|
||||||
|
if (info == null) {
|
||||||
|
LOG.info("Failed to update the container state. Container with id : {} "
|
||||||
|
+ "does not exist", containerID.getId());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.info("Failed to update the container state. Container with id : {} "
|
||||||
|
+ "does not exist", containerID.getId());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
|
if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
|
||||||
for (DatanodeDetails datanode : info.getPipeline().getMachines()) {
|
for (DatanodeDetails datanode : containerWithPipeline.getPipeline().getMachines()) {
|
||||||
containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
|
containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
|
||||||
new CloseContainerCommand(containerID.getId(),
|
new CloseContainerCommand(containerID.getId(),
|
||||||
info.getPipeline().getType()));
|
info.getReplicationType()));
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
// Finalize event will make sure the state of the container transitions
|
// Finalize event will make sure the state of the container transitions
|
||||||
// from OPEN to CLOSING in containerStateManager.
|
// from OPEN to CLOSING in containerStateManager.
|
||||||
stateManager
|
containerManager.getStateManager()
|
||||||
.updateContainerState(info, HddsProtos.LifeCycleEvent.FINALIZE);
|
.updateContainerState(info, HddsProtos.LifeCycleEvent.FINALIZE);
|
||||||
} catch (SCMException ex) {
|
} catch (SCMException ex) {
|
||||||
LOG.error("Failed to update the container state for container : {}"
|
LOG.error("Failed to update the container state for container : {}"
|
||||||
|
@ -21,6 +21,10 @@
|
|||||||
import com.google.common.primitives.Longs;
|
import com.google.common.primitives.Longs;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
|
import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
@ -166,6 +170,44 @@ public ContainerInfo getContainer(final long containerID) throws
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the ContainerInfo from the container ID.
|
||||||
|
*
|
||||||
|
* @param containerID - ID of container.
|
||||||
|
* @return - ContainerWithPipeline such as creation state and the pipeline.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ContainerWithPipeline getContainerWithPipeline(long containerID)
|
||||||
|
throws IOException {
|
||||||
|
ContainerInfo contInfo;
|
||||||
|
lock.lock();
|
||||||
|
try {
|
||||||
|
byte[] containerBytes = containerStore.get(
|
||||||
|
Longs.toByteArray(containerID));
|
||||||
|
if (containerBytes == null) {
|
||||||
|
throw new SCMException(
|
||||||
|
"Specified key does not exist. key : " + containerID,
|
||||||
|
SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
|
||||||
|
}
|
||||||
|
HddsProtos.SCMContainerInfo temp = HddsProtos.SCMContainerInfo.PARSER
|
||||||
|
.parseFrom(containerBytes);
|
||||||
|
contInfo = ContainerInfo.fromProtobuf(temp);
|
||||||
|
Pipeline pipeline = pipelineSelector
|
||||||
|
.getPipeline(contInfo.getPipelineName(),
|
||||||
|
contInfo.getReplicationType());
|
||||||
|
|
||||||
|
if(pipeline == null) {
|
||||||
|
pipeline = pipelineSelector
|
||||||
|
.getReplicationPipeline(contInfo.getReplicationType(),
|
||||||
|
contInfo.getReplicationFactor());
|
||||||
|
}
|
||||||
|
return new ContainerWithPipeline(contInfo, pipeline);
|
||||||
|
} finally {
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@ -208,13 +250,15 @@ public List<ContainerInfo> listContainer(long startContainerID,
|
|||||||
* @throws IOException - Exception
|
* @throws IOException - Exception
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ContainerInfo allocateContainer(
|
public ContainerWithPipeline allocateContainer(
|
||||||
ReplicationType type,
|
ReplicationType type,
|
||||||
ReplicationFactor replicationFactor,
|
ReplicationFactor replicationFactor,
|
||||||
String owner)
|
String owner)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
ContainerInfo containerInfo;
|
ContainerInfo containerInfo;
|
||||||
|
ContainerWithPipeline containerWithPipeline;
|
||||||
|
|
||||||
if (!nodeManager.isOutOfChillMode()) {
|
if (!nodeManager.isOutOfChillMode()) {
|
||||||
throw new SCMException(
|
throw new SCMException(
|
||||||
"Unable to create container while in chill mode",
|
"Unable to create container while in chill mode",
|
||||||
@ -223,9 +267,9 @@ public ContainerInfo allocateContainer(
|
|||||||
|
|
||||||
lock.lock();
|
lock.lock();
|
||||||
try {
|
try {
|
||||||
containerInfo =
|
containerWithPipeline = containerStateManager.allocateContainer(
|
||||||
containerStateManager.allocateContainer(
|
|
||||||
pipelineSelector, type, replicationFactor, owner);
|
pipelineSelector, type, replicationFactor, owner);
|
||||||
|
containerInfo = containerWithPipeline.getContainerInfo();
|
||||||
|
|
||||||
byte[] containerIDBytes = Longs.toByteArray(
|
byte[] containerIDBytes = Longs.toByteArray(
|
||||||
containerInfo.getContainerID());
|
containerInfo.getContainerID());
|
||||||
@ -234,7 +278,7 @@ public ContainerInfo allocateContainer(
|
|||||||
} finally {
|
} finally {
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
}
|
}
|
||||||
return containerInfo;
|
return containerWithPipeline;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -380,6 +424,35 @@ public ContainerStateManager getStateManager() {
|
|||||||
return containerStateManager;
|
return containerStateManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return a container matching the attributes specified.
|
||||||
|
*
|
||||||
|
* @param size - Space needed in the Container.
|
||||||
|
* @param owner - Owner of the container - A specific nameservice.
|
||||||
|
* @param type - Replication Type {StandAlone, Ratis}
|
||||||
|
* @param factor - Replication Factor {ONE, THREE}
|
||||||
|
* @param state - State of the Container-- {Open, Allocated etc.}
|
||||||
|
* @return ContainerInfo, null if there is no match found.
|
||||||
|
*/
|
||||||
|
public ContainerWithPipeline getMatchingContainerWithPipeline(final long size,
|
||||||
|
String owner, ReplicationType type, ReplicationFactor factor,
|
||||||
|
LifeCycleState state) throws IOException {
|
||||||
|
ContainerInfo containerInfo = getStateManager()
|
||||||
|
.getMatchingContainer(size, owner, type, factor, state);
|
||||||
|
if (containerInfo == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
Pipeline pipeline = pipelineSelector
|
||||||
|
.getPipeline(containerInfo.getPipelineName(),
|
||||||
|
containerInfo.getReplicationType());
|
||||||
|
if (pipeline == null) {
|
||||||
|
pipelineSelector
|
||||||
|
.getReplicationPipeline(containerInfo.getReplicationType(),
|
||||||
|
containerInfo.getReplicationFactor());
|
||||||
|
}
|
||||||
|
return new ContainerWithPipeline(containerInfo, pipeline);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Process container report from Datanode.
|
* Process container report from Datanode.
|
||||||
* <p>
|
* <p>
|
||||||
@ -415,7 +488,7 @@ public void processContainerReports(DatanodeDetails datanodeDetails,
|
|||||||
HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
|
HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
|
||||||
|
|
||||||
HddsProtos.SCMContainerInfo newState =
|
HddsProtos.SCMContainerInfo newState =
|
||||||
reconcileState(datanodeState, knownState);
|
reconcileState(datanodeState, knownState, datanodeDetails);
|
||||||
|
|
||||||
// FIX ME: This can be optimized, we write twice to memory, where a
|
// FIX ME: This can be optimized, we write twice to memory, where a
|
||||||
// single write would work well.
|
// single write would work well.
|
||||||
@ -425,8 +498,14 @@ public void processContainerReports(DatanodeDetails datanodeDetails,
|
|||||||
containerStore.put(dbKey, newState.toByteArray());
|
containerStore.put(dbKey, newState.toByteArray());
|
||||||
|
|
||||||
// If the container is closed, then state is already written to SCM
|
// If the container is closed, then state is already written to SCM
|
||||||
|
Pipeline pipeline = pipelineSelector.getPipeline(newState.getPipelineName(), newState.getReplicationType());
|
||||||
|
if(pipeline == null) {
|
||||||
|
pipeline = pipelineSelector
|
||||||
|
.getReplicationPipeline(newState.getReplicationType(),
|
||||||
|
newState.getReplicationFactor());
|
||||||
|
}
|
||||||
// DB.TODO: So can we can write only once to DB.
|
// DB.TODO: So can we can write only once to DB.
|
||||||
if (closeContainerIfNeeded(newState)) {
|
if (closeContainerIfNeeded(newState, pipeline)) {
|
||||||
LOG.info("Closing the Container: {}", newState.getContainerID());
|
LOG.info("Closing the Container: {}", newState.getContainerID());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -447,15 +526,22 @@ public void processContainerReports(DatanodeDetails datanodeDetails,
|
|||||||
*
|
*
|
||||||
* @param datanodeState - State from the Datanode.
|
* @param datanodeState - State from the Datanode.
|
||||||
* @param knownState - State inside SCM.
|
* @param knownState - State inside SCM.
|
||||||
|
* @param dnDetails
|
||||||
* @return new SCM State for this container.
|
* @return new SCM State for this container.
|
||||||
*/
|
*/
|
||||||
private HddsProtos.SCMContainerInfo reconcileState(
|
private HddsProtos.SCMContainerInfo reconcileState(
|
||||||
StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState,
|
StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState,
|
||||||
HddsProtos.SCMContainerInfo knownState) {
|
SCMContainerInfo knownState, DatanodeDetails dnDetails) {
|
||||||
HddsProtos.SCMContainerInfo.Builder builder =
|
HddsProtos.SCMContainerInfo.Builder builder =
|
||||||
HddsProtos.SCMContainerInfo.newBuilder();
|
HddsProtos.SCMContainerInfo.newBuilder();
|
||||||
builder.setContainerID(knownState.getContainerID());
|
builder.setContainerID(knownState.getContainerID())
|
||||||
builder.setPipeline(knownState.getPipeline());
|
.setPipelineName(knownState.getPipelineName())
|
||||||
|
.setReplicationType(knownState.getReplicationType())
|
||||||
|
.setReplicationFactor(knownState.getReplicationFactor());
|
||||||
|
|
||||||
|
// TODO: If current state doesn't have this DN in list of DataNodes with replica
|
||||||
|
// then add it in list of replicas.
|
||||||
|
|
||||||
// If used size is greater than allocated size, we will be updating
|
// If used size is greater than allocated size, we will be updating
|
||||||
// allocated size with used size. This update is done as a fallback
|
// allocated size with used size. This update is done as a fallback
|
||||||
// mechanism in case SCM crashes without properly updating allocated
|
// mechanism in case SCM crashes without properly updating allocated
|
||||||
@ -464,13 +550,13 @@ private HddsProtos.SCMContainerInfo reconcileState(
|
|||||||
long usedSize = datanodeState.getUsed();
|
long usedSize = datanodeState.getUsed();
|
||||||
long allocated = knownState.getAllocatedBytes() > usedSize ?
|
long allocated = knownState.getAllocatedBytes() > usedSize ?
|
||||||
knownState.getAllocatedBytes() : usedSize;
|
knownState.getAllocatedBytes() : usedSize;
|
||||||
builder.setAllocatedBytes(allocated);
|
builder.setAllocatedBytes(allocated)
|
||||||
builder.setUsedBytes(usedSize);
|
.setUsedBytes(usedSize)
|
||||||
builder.setNumberOfKeys(datanodeState.getKeyCount());
|
.setNumberOfKeys(datanodeState.getKeyCount())
|
||||||
builder.setState(knownState.getState());
|
.setState(knownState.getState())
|
||||||
builder.setStateEnterTime(knownState.getStateEnterTime());
|
.setStateEnterTime(knownState.getStateEnterTime())
|
||||||
builder.setContainerID(knownState.getContainerID());
|
.setContainerID(knownState.getContainerID())
|
||||||
builder.setDeleteTransactionId(knownState.getDeleteTransactionId());
|
.setDeleteTransactionId(knownState.getDeleteTransactionId());
|
||||||
if (knownState.getOwner() != null) {
|
if (knownState.getOwner() != null) {
|
||||||
builder.setOwner(knownState.getOwner());
|
builder.setOwner(knownState.getOwner());
|
||||||
}
|
}
|
||||||
@ -485,9 +571,11 @@ private HddsProtos.SCMContainerInfo reconcileState(
|
|||||||
* one protobuf in one file and another definition in another file.
|
* one protobuf in one file and another definition in another file.
|
||||||
*
|
*
|
||||||
* @param newState - This is the state we maintain in SCM.
|
* @param newState - This is the state we maintain in SCM.
|
||||||
|
* @param pipeline
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private boolean closeContainerIfNeeded(HddsProtos.SCMContainerInfo newState)
|
private boolean closeContainerIfNeeded(SCMContainerInfo newState,
|
||||||
|
Pipeline pipeline)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
float containerUsedPercentage = 1.0f *
|
float containerUsedPercentage = 1.0f *
|
||||||
newState.getUsedBytes() / this.size;
|
newState.getUsedBytes() / this.size;
|
||||||
@ -498,7 +586,7 @@ private boolean closeContainerIfNeeded(HddsProtos.SCMContainerInfo newState)
|
|||||||
// We will call closer till get to the closed state.
|
// We will call closer till get to the closed state.
|
||||||
// That is SCM will make this call repeatedly until we reach the closed
|
// That is SCM will make this call repeatedly until we reach the closed
|
||||||
// state.
|
// state.
|
||||||
closer.close(newState);
|
closer.close(newState, pipeline);
|
||||||
|
|
||||||
if (shouldClose(scmInfo)) {
|
if (shouldClose(scmInfo)) {
|
||||||
// This event moves the Container from Open to Closing State, this is
|
// This event moves the Container from Open to Closing State, this is
|
||||||
@ -598,10 +686,12 @@ public void flushContainerInfo() throws IOException {
|
|||||||
.setAllocatedBytes(info.getAllocatedBytes())
|
.setAllocatedBytes(info.getAllocatedBytes())
|
||||||
.setNumberOfKeys(oldInfo.getNumberOfKeys())
|
.setNumberOfKeys(oldInfo.getNumberOfKeys())
|
||||||
.setOwner(oldInfo.getOwner())
|
.setOwner(oldInfo.getOwner())
|
||||||
.setPipeline(oldInfo.getPipeline())
|
.setPipelineName(oldInfo.getPipelineName())
|
||||||
.setState(oldInfo.getState())
|
.setState(oldInfo.getState())
|
||||||
.setUsedBytes(oldInfo.getUsedBytes())
|
.setUsedBytes(oldInfo.getUsedBytes())
|
||||||
.setDeleteTransactionId(oldInfo.getDeleteTransactionId())
|
.setDeleteTransactionId(oldInfo.getDeleteTransactionId())
|
||||||
|
.setReplicationFactor(oldInfo.getReplicationFactor())
|
||||||
|
.setReplicationType(oldInfo.getReplicationType())
|
||||||
.build();
|
.build();
|
||||||
containerStore.put(dbKey, newInfo.getProtobuf().toByteArray());
|
containerStore.put(dbKey, newInfo.getProtobuf().toByteArray());
|
||||||
} else {
|
} else {
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.states.ContainerState;
|
import org.apache.hadoop.hdds.scm.container.states.ContainerState;
|
||||||
@ -279,10 +280,10 @@ private void initializeStateMachine() {
|
|||||||
* @param selector -- Pipeline selector class.
|
* @param selector -- Pipeline selector class.
|
||||||
* @param type -- Replication type.
|
* @param type -- Replication type.
|
||||||
* @param replicationFactor - Replication replicationFactor.
|
* @param replicationFactor - Replication replicationFactor.
|
||||||
* @return Container Info.
|
* @return ContainerWithPipeline
|
||||||
* @throws IOException on Failure.
|
* @throws IOException on Failure.
|
||||||
*/
|
*/
|
||||||
public ContainerInfo allocateContainer(PipelineSelector selector, HddsProtos
|
public ContainerWithPipeline allocateContainer(PipelineSelector selector, HddsProtos
|
||||||
.ReplicationType type, HddsProtos.ReplicationFactor replicationFactor,
|
.ReplicationType type, HddsProtos.ReplicationFactor replicationFactor,
|
||||||
String owner) throws IOException {
|
String owner) throws IOException {
|
||||||
|
|
||||||
@ -295,7 +296,7 @@ public ContainerInfo allocateContainer(PipelineSelector selector, HddsProtos
|
|||||||
|
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setState(HddsProtos.LifeCycleState.ALLOCATED)
|
.setState(HddsProtos.LifeCycleState.ALLOCATED)
|
||||||
.setPipeline(pipeline)
|
.setPipelineName(pipeline.getPipelineName())
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -305,11 +306,13 @@ public ContainerInfo allocateContainer(PipelineSelector selector, HddsProtos
|
|||||||
.setOwner(owner)
|
.setOwner(owner)
|
||||||
.setContainerID(containerCount.incrementAndGet())
|
.setContainerID(containerCount.incrementAndGet())
|
||||||
.setDeleteTransactionId(0)
|
.setDeleteTransactionId(0)
|
||||||
|
.setReplicationFactor(replicationFactor)
|
||||||
|
.setReplicationType(pipeline.getType())
|
||||||
.build();
|
.build();
|
||||||
Preconditions.checkNotNull(containerInfo);
|
Preconditions.checkNotNull(containerInfo);
|
||||||
containers.addContainer(containerInfo);
|
containers.addContainer(containerInfo);
|
||||||
LOG.trace("New container allocated: {}", containerInfo);
|
LOG.trace("New container allocated: {}", containerInfo);
|
||||||
return containerInfo;
|
return new ContainerWithPipeline(containerInfo, pipeline);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -432,8 +435,8 @@ private ContainerInfo findContainerWithSpace(long size,
|
|||||||
containerInfo.updateLastUsedTime();
|
containerInfo.updateLastUsedTime();
|
||||||
|
|
||||||
ContainerState key = new ContainerState(owner,
|
ContainerState key = new ContainerState(owner,
|
||||||
containerInfo.getPipeline().getType(),
|
containerInfo.getReplicationType(),
|
||||||
containerInfo.getPipeline().getFactor());
|
containerInfo.getReplicationFactor());
|
||||||
lastUsedMap.put(key, containerInfo.containerID());
|
lastUsedMap.put(key, containerInfo.containerID());
|
||||||
return containerInfo;
|
return containerInfo;
|
||||||
}
|
}
|
||||||
@ -457,6 +460,20 @@ public NavigableSet<ContainerID> getMatchingContainerIDs(
|
|||||||
factor, type);
|
factor, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the containerInfo with pipeline for the given container id.
|
||||||
|
* @param selector -- Pipeline selector class.
|
||||||
|
* @param containerID id of the container
|
||||||
|
* @return ContainerInfo containerInfo
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public ContainerWithPipeline getContainer(PipelineSelector selector,
|
||||||
|
ContainerID containerID) throws IOException {
|
||||||
|
ContainerInfo info = containers.getContainerInfo(containerID.getId());
|
||||||
|
Pipeline pipeline = selector.getPipeline(info.getPipelineName(), info.getReplicationType());
|
||||||
|
return new ContainerWithPipeline(info, pipeline);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the containerInfo for the given container id.
|
* Returns the containerInfo for the given container id.
|
||||||
* @param containerID id of the container
|
* @param containerID id of the container
|
||||||
@ -466,6 +483,7 @@ public NavigableSet<ContainerID> getMatchingContainerIDs(
|
|||||||
public ContainerInfo getContainer(ContainerID containerID) {
|
public ContainerInfo getContainer(ContainerID containerID) {
|
||||||
return containers.getContainerInfo(containerID.getId());
|
return containers.getContainerInfo(containerID.getId());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,10 @@
|
|||||||
package org.apache.hadoop.hdds.scm.container;
|
package org.apache.hadoop.hdds.scm.container;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
@ -42,6 +46,16 @@ public interface Mapping extends Closeable {
|
|||||||
*/
|
*/
|
||||||
ContainerInfo getContainer(long containerID) throws IOException;
|
ContainerInfo getContainer(long containerID) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the ContainerInfo from the container ID.
|
||||||
|
*
|
||||||
|
* @param containerID - ID of container.
|
||||||
|
* @return - ContainerWithPipeline such as creation state and the pipeline.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
ContainerWithPipeline getContainerWithPipeline(long containerID)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns containers under certain conditions.
|
* Returns containers under certain conditions.
|
||||||
* Search container IDs from start ID(exclusive),
|
* Search container IDs from start ID(exclusive),
|
||||||
@ -65,10 +79,10 @@ List<ContainerInfo> listContainer(long startContainerID, int count)
|
|||||||
*
|
*
|
||||||
* @param replicationFactor - replication factor of the container.
|
* @param replicationFactor - replication factor of the container.
|
||||||
* @param owner
|
* @param owner
|
||||||
* @return - Container Info.
|
* @return - ContainerWithPipeline.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
|
ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType type,
|
||||||
HddsProtos.ReplicationFactor replicationFactor, String owner)
|
HddsProtos.ReplicationFactor replicationFactor, String owner)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
@ -120,4 +134,12 @@ void updateDeleteTransactionId(Map<Long, Long> deleteTransactionMap)
|
|||||||
* @return NodeManager
|
* @return NodeManager
|
||||||
*/
|
*/
|
||||||
NodeManager getNodeManager();
|
NodeManager getNodeManager();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the ContainerWithPipeline.
|
||||||
|
* @return NodeManager
|
||||||
|
*/
|
||||||
|
public ContainerWithPipeline getMatchingContainerWithPipeline(final long size,
|
||||||
|
String owner, ReplicationType type, ReplicationFactor factor,
|
||||||
|
LifeCycleState state) throws IOException;
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,8 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
@ -90,8 +92,10 @@ public static int getCleanupWaterMark() {
|
|||||||
* lives.
|
* lives.
|
||||||
*
|
*
|
||||||
* @param info - ContainerInfo.
|
* @param info - ContainerInfo.
|
||||||
|
* @param pipeline
|
||||||
*/
|
*/
|
||||||
public void close(HddsProtos.SCMContainerInfo info) {
|
public void close(SCMContainerInfo info,
|
||||||
|
Pipeline pipeline) {
|
||||||
|
|
||||||
if (commandIssued.containsKey(info.getContainerID())) {
|
if (commandIssued.containsKey(info.getContainerID())) {
|
||||||
// We check if we issued a close command in last 3 * reportInterval secs.
|
// We check if we issued a close command in last 3 * reportInterval secs.
|
||||||
@ -126,13 +130,10 @@ public void close(HddsProtos.SCMContainerInfo info) {
|
|||||||
// this queue can be emptied by a datanode after a close report is send
|
// this queue can be emptied by a datanode after a close report is send
|
||||||
// to SCM. In that case also, data node will ignore this command.
|
// to SCM. In that case also, data node will ignore this command.
|
||||||
|
|
||||||
HddsProtos.Pipeline pipeline = info.getPipeline();
|
for (DatanodeDetails datanodeDetails : pipeline.getMachines()) {
|
||||||
for (HddsProtos.DatanodeDetailsProto datanodeDetails :
|
nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
|
||||||
pipeline.getMembersList()) {
|
|
||||||
nodeManager.addDatanodeCommand(
|
|
||||||
DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(),
|
|
||||||
new CloseContainerCommand(info.getContainerID(),
|
new CloseContainerCommand(info.getContainerID(),
|
||||||
pipeline.getType()));
|
info.getReplicationType()));
|
||||||
}
|
}
|
||||||
if (!commandIssued.containsKey(info.getContainerID())) {
|
if (!commandIssued.containsKey(info.getContainerID())) {
|
||||||
commandIssued.put(info.getContainerID(),
|
commandIssued.put(info.getContainerID(),
|
||||||
|
@ -116,7 +116,8 @@ public ContainerStateMap() {
|
|||||||
public void addContainer(ContainerInfo info)
|
public void addContainer(ContainerInfo info)
|
||||||
throws SCMException {
|
throws SCMException {
|
||||||
Preconditions.checkNotNull(info, "Container Info cannot be null");
|
Preconditions.checkNotNull(info, "Container Info cannot be null");
|
||||||
Preconditions.checkNotNull(info.getPipeline(), "Pipeline cannot be null");
|
Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0,
|
||||||
|
"ExpectedReplicaCount should be greater than 0");
|
||||||
|
|
||||||
try (AutoCloseableLock lock = autoLock.acquire()) {
|
try (AutoCloseableLock lock = autoLock.acquire()) {
|
||||||
ContainerID id = ContainerID.valueof(info.getContainerID());
|
ContainerID id = ContainerID.valueof(info.getContainerID());
|
||||||
@ -129,8 +130,8 @@ public void addContainer(ContainerInfo info)
|
|||||||
|
|
||||||
lifeCycleStateMap.insert(info.getState(), id);
|
lifeCycleStateMap.insert(info.getState(), id);
|
||||||
ownerMap.insert(info.getOwner(), id);
|
ownerMap.insert(info.getOwner(), id);
|
||||||
factorMap.insert(info.getPipeline().getFactor(), id);
|
factorMap.insert(info.getReplicationFactor(), id);
|
||||||
typeMap.insert(info.getPipeline().getType(), id);
|
typeMap.insert(info.getReplicationType(), id);
|
||||||
LOG.trace("Created container with {} successfully.", id);
|
LOG.trace("Created container with {} successfully.", id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,9 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdds.scm.pipelines;
|
package org.apache.hadoop.hdds.scm.pipelines;
|
||||||
|
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.WeakHashMap;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
|
||||||
@ -25,7 +28,6 @@
|
|||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
@ -36,11 +38,13 @@ public abstract class PipelineManager {
|
|||||||
private static final Logger LOG =
|
private static final Logger LOG =
|
||||||
LoggerFactory.getLogger(PipelineManager.class);
|
LoggerFactory.getLogger(PipelineManager.class);
|
||||||
private final List<Pipeline> activePipelines;
|
private final List<Pipeline> activePipelines;
|
||||||
|
private final Map<String, Pipeline> activePipelineMap;
|
||||||
private final AtomicInteger pipelineIndex;
|
private final AtomicInteger pipelineIndex;
|
||||||
|
|
||||||
public PipelineManager() {
|
public PipelineManager() {
|
||||||
activePipelines = new LinkedList<>();
|
activePipelines = new LinkedList<>();
|
||||||
pipelineIndex = new AtomicInteger(0);
|
pipelineIndex = new AtomicInteger(0);
|
||||||
|
activePipelineMap = new WeakHashMap<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -76,6 +80,7 @@ public synchronized final Pipeline getPipeline(
|
|||||||
"replicationType:{} replicationFactor:{}",
|
"replicationType:{} replicationFactor:{}",
|
||||||
pipeline.getPipelineName(), replicationType, replicationFactor);
|
pipeline.getPipelineName(), replicationType, replicationFactor);
|
||||||
activePipelines.add(pipeline);
|
activePipelines.add(pipeline);
|
||||||
|
activePipelineMap.put(pipeline.getPipelineName(), pipeline);
|
||||||
} else {
|
} else {
|
||||||
pipeline =
|
pipeline =
|
||||||
findOpenPipeline(replicationType, replicationFactor);
|
findOpenPipeline(replicationType, replicationFactor);
|
||||||
@ -94,6 +99,26 @@ public synchronized final Pipeline getPipeline(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function to get pipeline with given pipeline name.
|
||||||
|
*
|
||||||
|
* @param pipelineName
|
||||||
|
* @return a Pipeline.
|
||||||
|
*/
|
||||||
|
public synchronized final Pipeline getPipeline(String pipelineName) {
|
||||||
|
Pipeline pipeline = null;
|
||||||
|
|
||||||
|
// 1. Check if pipeline channel already exists
|
||||||
|
if (activePipelineMap.containsKey(pipelineName)) {
|
||||||
|
pipeline = activePipelineMap.get(pipelineName);
|
||||||
|
LOG.debug("Returning pipeline for pipelineName:{}", pipelineName);
|
||||||
|
return pipeline;
|
||||||
|
} else {
|
||||||
|
LOG.debug("Unable to find pipeline for pipelineName:{}", pipelineName);
|
||||||
|
}
|
||||||
|
return pipeline;
|
||||||
|
}
|
||||||
|
|
||||||
protected int getReplicationCount(ReplicationFactor factor) {
|
protected int getReplicationCount(ReplicationFactor factor) {
|
||||||
switch (factor) {
|
switch (factor) {
|
||||||
case ONE:
|
case ONE:
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||||
.ContainerPlacementPolicy;
|
.ContainerPlacementPolicy;
|
||||||
@ -176,6 +177,21 @@ public Pipeline getReplicationPipeline(ReplicationType replicationType,
|
|||||||
getPipeline(replicationFactor, replicationType);
|
getPipeline(replicationFactor, replicationType);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function to return pipeline for given pipeline name and replication
|
||||||
|
* type.
|
||||||
|
*/
|
||||||
|
public Pipeline getPipeline(String pipelineName,
|
||||||
|
ReplicationType replicationType) throws IOException {
|
||||||
|
if (pipelineName == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
PipelineManager manager = getPipelineManager(replicationType);
|
||||||
|
Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
|
||||||
|
LOG.debug("Getting replication pipeline forReplicationType {} :" +
|
||||||
|
" pipelineName:{}", replicationType, pipelineName);
|
||||||
|
return manager.getPipeline(pipelineName);
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Creates a pipeline from a specified set of Nodes.
|
* Creates a pipeline from a specified set of Nodes.
|
||||||
*/
|
*/
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientRatis;
|
import org.apache.hadoop.hdds.scm.XceiverClientRatis;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||||
.ContainerPlacementPolicy;
|
.ContainerPlacementPolicy;
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
package org.apache.hadoop.hdds.scm.pipelines.standalone;
|
package org.apache.hadoop.hdds.scm.pipelines.standalone;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||||
.ContainerPlacementPolicy;
|
.ContainerPlacementPolicy;
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
.StorageContainerLocationProtocolProtos;
|
.StorageContainerLocationProtocolProtos;
|
||||||
import org.apache.hadoop.hdds.scm.HddsServerUtil;
|
import org.apache.hadoop.hdds.scm.HddsServerUtil;
|
||||||
import org.apache.hadoop.hdds.scm.ScmInfo;
|
import org.apache.hadoop.hdds.scm.ScmInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
|
||||||
@ -145,11 +146,12 @@ public String getRpcRemoteUsername() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ContainerInfo allocateContainer(HddsProtos.ReplicationType
|
public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
|
||||||
replicationType, HddsProtos.ReplicationFactor factor,
|
replicationType, HddsProtos.ReplicationFactor factor,
|
||||||
String owner) throws IOException {
|
String owner) throws IOException {
|
||||||
String remoteUser = getRpcRemoteUsername();
|
String remoteUser = getRpcRemoteUsername();
|
||||||
getScm().checkAdminAccess(remoteUser);
|
getScm().checkAdminAccess(remoteUser);
|
||||||
|
|
||||||
return scm.getScmContainerManager()
|
return scm.getScmContainerManager()
|
||||||
.allocateContainer(replicationType, factor, owner);
|
.allocateContainer(replicationType, factor, owner);
|
||||||
}
|
}
|
||||||
@ -162,6 +164,14 @@ public ContainerInfo getContainer(long containerID) throws IOException {
|
|||||||
.getContainer(containerID);
|
.getContainer(containerID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException {
|
||||||
|
String remoteUser = getRpcRemoteUsername();
|
||||||
|
getScm().checkAdminAccess(remoteUser);
|
||||||
|
return scm.getScmContainerManager()
|
||||||
|
.getContainerWithPipeline(containerID);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<ContainerInfo> listContainer(long startContainerID,
|
public List<ContainerInfo> listContainer(long startContainerID,
|
||||||
int count) throws IOException {
|
int count) throws IOException {
|
||||||
@ -248,7 +258,7 @@ public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type,
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
// TODO: will be addressed in future patch.
|
// TODO: will be addressed in future patch.
|
||||||
// This is needed only for debugging purposes to make sure cluster is
|
// This is needed only for debugging purposes to make sure cluster is
|
||||||
// working correctly.
|
// working correctly.
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
|
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
|
||||||
import org.apache.hadoop.hdds.scm.container.Mapping;
|
import org.apache.hadoop.hdds.scm.container.Mapping;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
@ -362,10 +363,16 @@ private void mockContainerInfo(Mapping mappingService, long containerID,
|
|||||||
pipeline.addMember(dd);
|
pipeline.addMember(dd);
|
||||||
|
|
||||||
ContainerInfo.Builder builder = new ContainerInfo.Builder();
|
ContainerInfo.Builder builder = new ContainerInfo.Builder();
|
||||||
builder.setPipeline(pipeline);
|
builder.setPipelineName(pipeline.getPipelineName())
|
||||||
|
.setReplicationType(pipeline.getType())
|
||||||
|
.setReplicationFactor(pipeline.getFactor());
|
||||||
|
|
||||||
ContainerInfo conatinerInfo = builder.build();
|
ContainerInfo containerInfo = builder.build();
|
||||||
Mockito.doReturn(conatinerInfo).when(mappingService)
|
ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(
|
||||||
|
containerInfo, pipeline);
|
||||||
|
Mockito.doReturn(containerInfo).when(mappingService)
|
||||||
.getContainer(containerID);
|
.getContainer(containerID);
|
||||||
|
Mockito.doReturn(containerWithPipeline).when(mappingService)
|
||||||
|
.getContainerWithPipeline(containerID);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.server.events.EventQueue;
|
import org.apache.hadoop.hdds.server.events.EventQueue;
|
||||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
|
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
|
||||||
@ -97,7 +97,7 @@ public void testCloseContainerEventWithInvalidContainer() {
|
|||||||
new ContainerID(id));
|
new ContainerID(id));
|
||||||
eventQueue.processAll(1000);
|
eventQueue.processAll(1000);
|
||||||
Assert.assertTrue(logCapturer.getOutput()
|
Assert.assertTrue(logCapturer.getOutput()
|
||||||
.contains("Container with id : " + id + " does not exist"));
|
.contains("Failed to update the container state"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -105,11 +105,12 @@ public void testCloseContainerEventWithValidContainers() throws IOException {
|
|||||||
|
|
||||||
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
|
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
|
||||||
.captureLogs(CloseContainerEventHandler.LOG);
|
.captureLogs(CloseContainerEventHandler.LOG);
|
||||||
ContainerInfo info = mapping
|
ContainerWithPipeline containerWithPipeline = mapping
|
||||||
.allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
|
.allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
|
||||||
HddsProtos.ReplicationFactor.ONE, "ozone");
|
HddsProtos.ReplicationFactor.ONE, "ozone");
|
||||||
ContainerID id = new ContainerID(info.getContainerID());
|
ContainerID id = new ContainerID(
|
||||||
DatanodeDetails datanode = info.getPipeline().getLeader();
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
|
DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
|
||||||
int closeCount = nodeManager.getCommandCount(datanode);
|
int closeCount = nodeManager.getCommandCount(datanode);
|
||||||
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
|
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
|
||||||
eventQueue.processAll(1000);
|
eventQueue.processAll(1000);
|
||||||
@ -125,7 +126,8 @@ public void testCloseContainerEventWithValidContainers() throws IOException {
|
|||||||
mapping.updateContainerState(id.getId(), CREATE);
|
mapping.updateContainerState(id.getId(), CREATE);
|
||||||
mapping.updateContainerState(id.getId(), CREATED);
|
mapping.updateContainerState(id.getId(), CREATED);
|
||||||
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
|
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
|
||||||
new ContainerID(info.getContainerID()));
|
new ContainerID(
|
||||||
|
containerWithPipeline.getContainerInfo().getContainerID()));
|
||||||
eventQueue.processAll(1000);
|
eventQueue.processAll(1000);
|
||||||
Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode));
|
Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode));
|
||||||
Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
|
Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
|
||||||
@ -137,20 +139,23 @@ public void testCloseContainerEventWithRatis() throws IOException {
|
|||||||
|
|
||||||
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
|
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
|
||||||
.captureLogs(CloseContainerEventHandler.LOG);
|
.captureLogs(CloseContainerEventHandler.LOG);
|
||||||
ContainerInfo info = mapping
|
ContainerWithPipeline containerWithPipeline = mapping
|
||||||
.allocateContainer(HddsProtos.ReplicationType.RATIS,
|
.allocateContainer(HddsProtos.ReplicationType.RATIS,
|
||||||
HddsProtos.ReplicationFactor.THREE, "ozone");
|
HddsProtos.ReplicationFactor.THREE, "ozone");
|
||||||
ContainerID id = new ContainerID(info.getContainerID());
|
ContainerID id = new ContainerID(
|
||||||
|
containerWithPipeline.getContainerInfo().getContainerID());
|
||||||
int[] closeCount = new int[3];
|
int[] closeCount = new int[3];
|
||||||
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
|
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
|
||||||
eventQueue.processAll(1000);
|
eventQueue.processAll(1000);
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (DatanodeDetails details : info.getPipeline().getMachines()) {
|
for (DatanodeDetails details : containerWithPipeline.getPipeline()
|
||||||
|
.getMachines()) {
|
||||||
closeCount[i] = nodeManager.getCommandCount(details);
|
closeCount[i] = nodeManager.getCommandCount(details);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
i = 0;
|
i = 0;
|
||||||
for (DatanodeDetails details : info.getPipeline().getMachines()) {
|
for (DatanodeDetails details : containerWithPipeline.getPipeline()
|
||||||
|
.getMachines()) {
|
||||||
Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
|
Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@ -161,12 +166,12 @@ public void testCloseContainerEventWithRatis() throws IOException {
|
|||||||
//Execute these state transitions so that we can close the container.
|
//Execute these state transitions so that we can close the container.
|
||||||
mapping.updateContainerState(id.getId(), CREATE);
|
mapping.updateContainerState(id.getId(), CREATE);
|
||||||
mapping.updateContainerState(id.getId(), CREATED);
|
mapping.updateContainerState(id.getId(), CREATED);
|
||||||
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
|
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
|
||||||
new ContainerID(info.getContainerID()));
|
|
||||||
eventQueue.processAll(1000);
|
eventQueue.processAll(1000);
|
||||||
i = 0;
|
i = 0;
|
||||||
// Make sure close is queued for each datanode on the pipeline
|
// Make sure close is queued for each datanode on the pipeline
|
||||||
for (DatanodeDetails details : info.getPipeline().getMachines()) {
|
for (DatanodeDetails details : containerWithPipeline.getPipeline()
|
||||||
|
.getMachines()) {
|
||||||
Assert.assertEquals(closeCount[i] + 1,
|
Assert.assertEquals(closeCount[i] + 1,
|
||||||
nodeManager.getCommandCount(details));
|
nodeManager.getCommandCount(details));
|
||||||
Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
|
Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import org.apache.hadoop.hdds.scm.TestUtils;
|
import org.apache.hadoop.hdds.scm.TestUtils;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
@ -103,7 +104,7 @@ public void clearChillMode() {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testallocateContainer() throws Exception {
|
public void testallocateContainer() throws Exception {
|
||||||
ContainerInfo containerInfo = mapping.allocateContainer(
|
ContainerWithPipeline containerInfo = mapping.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
@ -120,7 +121,7 @@ public void testallocateContainerDistributesAllocation() throws Exception {
|
|||||||
*/
|
*/
|
||||||
Set<UUID> pipelineList = new TreeSet<>();
|
Set<UUID> pipelineList = new TreeSet<>();
|
||||||
for (int x = 0; x < 30; x++) {
|
for (int x = 0; x < 30; x++) {
|
||||||
ContainerInfo containerInfo = mapping.allocateContainer(
|
ContainerWithPipeline containerInfo = mapping.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
@ -135,14 +136,13 @@ public void testallocateContainerDistributesAllocation() throws Exception {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetContainer() throws IOException {
|
public void testGetContainer() throws IOException {
|
||||||
ContainerInfo containerInfo = mapping.allocateContainer(
|
ContainerWithPipeline containerInfo = mapping.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
Pipeline pipeline = containerInfo.getPipeline();
|
Pipeline pipeline = containerInfo.getPipeline();
|
||||||
Assert.assertNotNull(pipeline);
|
Assert.assertNotNull(pipeline);
|
||||||
Pipeline newPipeline = mapping.getContainer(
|
Pipeline newPipeline = containerInfo.getPipeline();
|
||||||
containerInfo.getContainerID()).getPipeline();
|
|
||||||
Assert.assertEquals(pipeline.getLeader().getUuid(),
|
Assert.assertEquals(pipeline.getLeader().getUuid(),
|
||||||
newPipeline.getLeader().getUuid());
|
newPipeline.getLeader().getUuid());
|
||||||
}
|
}
|
||||||
@ -165,12 +165,12 @@ public void testChillModeAllocateContainerFails() throws IOException {
|
|||||||
public void testContainerCreationLeaseTimeout() throws IOException,
|
public void testContainerCreationLeaseTimeout() throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
nodeManager.setChillmode(false);
|
nodeManager.setChillmode(false);
|
||||||
ContainerInfo containerInfo = mapping.allocateContainer(
|
ContainerWithPipeline containerInfo = mapping.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
mapping.updateContainerState(containerInfo.getContainerID(),
|
mapping.updateContainerState(containerInfo.getContainerInfo()
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
.getContainerID(), HddsProtos.LifeCycleEvent.CREATE);
|
||||||
Thread.sleep(TIMEOUT + 1000);
|
Thread.sleep(TIMEOUT + 1000);
|
||||||
|
|
||||||
NavigableSet<ContainerID> deleteContainers = mapping.getStateManager()
|
NavigableSet<ContainerID> deleteContainers = mapping.getStateManager()
|
||||||
@ -179,12 +179,14 @@ public void testContainerCreationLeaseTimeout() throws IOException,
|
|||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.DELETING);
|
HddsProtos.LifeCycleState.DELETING);
|
||||||
Assert.assertTrue(deleteContainers.contains(containerInfo.containerID()));
|
Assert.assertTrue(deleteContainers
|
||||||
|
.contains(containerInfo.getContainerInfo().containerID()));
|
||||||
|
|
||||||
thrown.expect(IOException.class);
|
thrown.expect(IOException.class);
|
||||||
thrown.expectMessage("Lease Exception");
|
thrown.expectMessage("Lease Exception");
|
||||||
mapping.updateContainerState(containerInfo.getContainerID(),
|
mapping
|
||||||
HddsProtos.LifeCycleEvent.CREATED);
|
.updateContainerState(containerInfo.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -294,10 +296,11 @@ public void testCloseContainer() throws IOException {
|
|||||||
private ContainerInfo createContainer()
|
private ContainerInfo createContainer()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
nodeManager.setChillmode(false);
|
nodeManager.setChillmode(false);
|
||||||
ContainerInfo containerInfo = mapping.allocateContainer(
|
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
|
ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
|
||||||
mapping.updateContainerState(containerInfo.getContainerID(),
|
mapping.updateContainerState(containerInfo.getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
mapping.updateContainerState(containerInfo.getContainerID(),
|
mapping.updateContainerState(containerInfo.getContainerID(),
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
|
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
|
||||||
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
|
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
|
||||||
import org.apache.hadoop.hdds.scm.container.TestContainerMapping;
|
import org.apache.hadoop.hdds.scm.container.TestContainerMapping;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
@ -91,9 +92,10 @@ public static void tearDown() throws Exception {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testClose() throws IOException {
|
public void testClose() throws IOException {
|
||||||
ContainerInfo info = mapping.allocateContainer(
|
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
|
||||||
HddsProtos.ReplicationType.STAND_ALONE,
|
HddsProtos.ReplicationType.STAND_ALONE,
|
||||||
HddsProtos.ReplicationFactor.ONE, "ozone");
|
HddsProtos.ReplicationFactor.ONE, "ozone");
|
||||||
|
ContainerInfo info = containerWithPipeline.getContainerInfo();
|
||||||
|
|
||||||
//Execute these state transitions so that we can close the container.
|
//Execute these state transitions so that we can close the container.
|
||||||
mapping.updateContainerState(info.getContainerID(), CREATE);
|
mapping.updateContainerState(info.getContainerID(), CREATE);
|
||||||
@ -101,7 +103,7 @@ public void testClose() throws IOException {
|
|||||||
long currentCount = mapping.getCloser().getCloseCount();
|
long currentCount = mapping.getCloser().getCloseCount();
|
||||||
long runCount = mapping.getCloser().getThreadRunCount();
|
long runCount = mapping.getCloser().getThreadRunCount();
|
||||||
|
|
||||||
DatanodeDetails datanode = info.getPipeline().getLeader();
|
DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
|
||||||
// Send a container report with used set to 1 GB. This should not close.
|
// Send a container report with used set to 1 GB. This should not close.
|
||||||
sendContainerReport(info, 1 * GIGABYTE);
|
sendContainerReport(info, 1 * GIGABYTE);
|
||||||
|
|
||||||
@ -138,9 +140,10 @@ public void testRepeatedClose() throws IOException,
|
|||||||
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
|
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
|
||||||
TimeUnit.SECONDS);
|
TimeUnit.SECONDS);
|
||||||
|
|
||||||
ContainerInfo info = mapping.allocateContainer(
|
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
|
||||||
HddsProtos.ReplicationType.STAND_ALONE,
|
HddsProtos.ReplicationType.STAND_ALONE,
|
||||||
HddsProtos.ReplicationFactor.ONE, "ozone");
|
HddsProtos.ReplicationFactor.ONE, "ozone");
|
||||||
|
ContainerInfo info = containerWithPipeline.getContainerInfo();
|
||||||
|
|
||||||
//Execute these state transitions so that we can close the container.
|
//Execute these state transitions so that we can close the container.
|
||||||
mapping.updateContainerState(info.getContainerID(), CREATE);
|
mapping.updateContainerState(info.getContainerID(), CREATE);
|
||||||
@ -148,10 +151,10 @@ public void testRepeatedClose() throws IOException,
|
|||||||
long currentCount = mapping.getCloser().getCloseCount();
|
long currentCount = mapping.getCloser().getCloseCount();
|
||||||
long runCount = mapping.getCloser().getThreadRunCount();
|
long runCount = mapping.getCloser().getThreadRunCount();
|
||||||
|
|
||||||
|
DatanodeDetails datanodeDetails = containerWithPipeline.getPipeline()
|
||||||
|
.getLeader();
|
||||||
|
|
||||||
DatanodeDetails datanodeDetails = info.getPipeline().getLeader();
|
// Send this command twice and assert we have only one command in queue.
|
||||||
|
|
||||||
// Send this command twice and assert we have only one command in the queue.
|
|
||||||
sendContainerReport(info, 5 * GIGABYTE);
|
sendContainerReport(info, 5 * GIGABYTE);
|
||||||
sendContainerReport(info, 5 * GIGABYTE);
|
sendContainerReport(info, 5 * GIGABYTE);
|
||||||
|
|
||||||
@ -183,9 +186,10 @@ public void testCleanupThreadRuns() throws IOException,
|
|||||||
long runCount = mapping.getCloser().getThreadRunCount();
|
long runCount = mapping.getCloser().getThreadRunCount();
|
||||||
|
|
||||||
for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) {
|
for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) {
|
||||||
ContainerInfo info = mapping.allocateContainer(
|
ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
|
||||||
HddsProtos.ReplicationType.STAND_ALONE,
|
HddsProtos.ReplicationType.STAND_ALONE,
|
||||||
HddsProtos.ReplicationFactor.ONE, "ozone");
|
HddsProtos.ReplicationFactor.ONE, "ozone");
|
||||||
|
ContainerInfo info = containerWithPipeline.getContainerInfo();
|
||||||
mapping.updateContainerState(info.getContainerID(), CREATE);
|
mapping.updateContainerState(info.getContainerID(), CREATE);
|
||||||
mapping.updateContainerState(info.getContainerID(), CREATED);
|
mapping.updateContainerState(info.getContainerID(), CREATED);
|
||||||
sendContainerReport(info, 5 * GIGABYTE);
|
sendContainerReport(info, 5 * GIGABYTE);
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
import org.apache.hadoop.hdds.scm.TestUtils;
|
import org.apache.hadoop.hdds.scm.TestUtils;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
|
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||||
.ContainerPlacementPolicy;
|
.ContainerPlacementPolicy;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
import org.apache.hadoop.hdds.scm.container.placement.algorithms
|
||||||
@ -151,11 +151,11 @@ public void testContainerPlacementCapacity() throws IOException,
|
|||||||
|
|
||||||
assertTrue(nodeManager.isOutOfChillMode());
|
assertTrue(nodeManager.isOutOfChillMode());
|
||||||
|
|
||||||
ContainerInfo containerInfo = containerManager.allocateContainer(
|
ContainerWithPipeline containerWithPipeline = containerManager.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(), "OZONE");
|
xceiverClientManager.getFactor(), "OZONE");
|
||||||
assertEquals(xceiverClientManager.getFactor().getNumber(),
|
assertEquals(xceiverClientManager.getFactor().getNumber(),
|
||||||
containerInfo.getPipeline().getMachines().size());
|
containerWithPipeline.getPipeline().getMachines().size());
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeQuietly(containerManager);
|
IOUtils.closeQuietly(containerManager);
|
||||||
IOUtils.closeQuietly(nodeManager);
|
IOUtils.closeQuietly(nodeManager);
|
||||||
|
@ -24,9 +24,9 @@
|
|||||||
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
|
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
|
||||||
import org.apache.hadoop.hdds.scm.cli.SCMCLI;
|
import org.apache.hadoop.hdds.scm.cli.SCMCLI;
|
||||||
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The handler of close container command.
|
* The handler of close container command.
|
||||||
@ -51,15 +51,15 @@ public void execute(CommandLine cmd) throws IOException {
|
|||||||
}
|
}
|
||||||
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
|
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
|
||||||
|
|
||||||
ContainerInfo container = getScmClient().
|
ContainerWithPipeline container = getScmClient().
|
||||||
getContainer(Long.parseLong(containerID));
|
getContainerWithPipeline(Long.parseLong(containerID));
|
||||||
if (container == null) {
|
if (container == null) {
|
||||||
throw new IOException("Cannot close an non-exist container "
|
throw new IOException("Cannot close an non-exist container "
|
||||||
+ containerID);
|
+ containerID);
|
||||||
}
|
}
|
||||||
logOut("Closing container : %s.", containerID);
|
logOut("Closing container : %s.", containerID);
|
||||||
getScmClient().closeContainer(container.getContainerID(),
|
getScmClient()
|
||||||
container.getPipeline());
|
.closeContainer(container.getContainerInfo().getContainerID());
|
||||||
logOut("Container closed.");
|
logOut("Container closed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,9 +25,9 @@
|
|||||||
import org.apache.commons.cli.Options;
|
import org.apache.commons.cli.Options;
|
||||||
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
|
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
|
||||||
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
|
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
|
||||||
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
|
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
|
||||||
@ -60,7 +60,7 @@ public void execute(CommandLine cmd) throws IOException {
|
|||||||
|
|
||||||
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
|
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
|
||||||
|
|
||||||
ContainerInfo container = getScmClient().getContainer(
|
ContainerWithPipeline container = getScmClient().getContainerWithPipeline(
|
||||||
Long.parseLong(containerID));
|
Long.parseLong(containerID));
|
||||||
if (container == null) {
|
if (container == null) {
|
||||||
throw new IOException("Cannot delete an non-exist container "
|
throw new IOException("Cannot delete an non-exist container "
|
||||||
@ -68,8 +68,9 @@ public void execute(CommandLine cmd) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
logOut("Deleting container : %s.", containerID);
|
logOut("Deleting container : %s.", containerID);
|
||||||
getScmClient().deleteContainer(container.getContainerID(),
|
getScmClient()
|
||||||
container.getPipeline(), cmd.hasOption(OPT_FORCE));
|
.deleteContainer(container.getContainerInfo().getContainerID(),
|
||||||
|
container.getPipeline(), cmd.hasOption(OPT_FORCE));
|
||||||
logOut("Container %s deleted.", containerID);
|
logOut("Container %s deleted.", containerID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@
|
|||||||
import org.apache.commons.cli.Options;
|
import org.apache.commons.cli.Options;
|
||||||
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
|
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
|
||||||
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
||||||
.ContainerData;
|
.ContainerData;
|
||||||
@ -33,6 +32,7 @@
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
|
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
|
||||||
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
|
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
|
||||||
@ -68,13 +68,12 @@ public void execute(CommandLine cmd) throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
|
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
|
||||||
ContainerInfo container = getScmClient().
|
ContainerWithPipeline container = getScmClient().
|
||||||
getContainer(Long.parseLong(containerID));
|
getContainerWithPipeline(Long.parseLong(containerID));
|
||||||
Preconditions.checkNotNull(container, "Container cannot be null");
|
Preconditions.checkNotNull(container, "Container cannot be null");
|
||||||
|
|
||||||
ContainerData containerData =
|
ContainerData containerData = getScmClient().readContainer(container
|
||||||
getScmClient().readContainer(container.getContainerID(),
|
.getContainerInfo().getContainerID(), container.getPipeline());
|
||||||
container.getPipeline());
|
|
||||||
|
|
||||||
// Print container report info.
|
// Print container report info.
|
||||||
logOut("Container id: %s", containerID);
|
logOut("Container id: %s", containerID);
|
||||||
|
@ -21,8 +21,8 @@
|
|||||||
import org.apache.hadoop.fs.FSExceptionMessages;
|
import org.apache.hadoop.fs.FSExceptionMessages;
|
||||||
import org.apache.hadoop.fs.Seekable;
|
import org.apache.hadoop.fs.Seekable;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.hdds.client.BlockID;
|
import org.apache.hadoop.hdds.client.BlockID;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
|
||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
@ -271,17 +271,17 @@ public static LengthInputStream getFromKsmKeyInfo(KsmKeyInfo keyInfo,
|
|||||||
KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
|
KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
|
||||||
BlockID blockID = ksmKeyLocationInfo.getBlockID();
|
BlockID blockID = ksmKeyLocationInfo.getBlockID();
|
||||||
long containerID = blockID.getContainerID();
|
long containerID = blockID.getContainerID();
|
||||||
ContainerInfo container =
|
ContainerWithPipeline containerWithPipeline =
|
||||||
storageContainerLocationClient.getContainer(containerID);
|
storageContainerLocationClient.getContainerWithPipeline(containerID);
|
||||||
XceiverClientSpi xceiverClient =
|
XceiverClientSpi xceiverClient = xceiverClientManager
|
||||||
xceiverClientManager.acquireClient(container.getPipeline(), containerID);
|
.acquireClient(containerWithPipeline.getPipeline(), containerID);
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
containerKey = ksmKeyLocationInfo.getLocalID();
|
containerKey = ksmKeyLocationInfo.getLocalID();
|
||||||
try {
|
try {
|
||||||
LOG.debug("get key accessing {} {}",
|
LOG.debug("get key accessing {} {}",
|
||||||
containerID, containerKey);
|
containerID, containerKey);
|
||||||
groupInputStream.streamOffset[i] = length;
|
groupInputStream.streamOffset[i] = length;
|
||||||
ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
|
ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
|
||||||
.containerKeyDataForRead(blockID);
|
.containerKeyDataForRead(blockID);
|
||||||
ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
|
ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
|
||||||
.getKey(xceiverClient, containerKeyData, requestId);
|
.getKey(xceiverClient, containerKeyData, requestId);
|
||||||
@ -292,7 +292,8 @@ public static LengthInputStream getFromKsmKeyInfo(KsmKeyInfo keyInfo,
|
|||||||
}
|
}
|
||||||
success = true;
|
success = true;
|
||||||
ChunkInputStream inputStream = new ChunkInputStream(
|
ChunkInputStream inputStream = new ChunkInputStream(
|
||||||
ksmKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient,
|
ksmKeyLocationInfo.getBlockID(), xceiverClientManager,
|
||||||
|
xceiverClient,
|
||||||
chunks, requestId);
|
chunks, requestId);
|
||||||
groupInputStream.addStream(inputStream,
|
groupInputStream.addStream(inputStream,
|
||||||
ksmKeyLocationInfo.getLength());
|
ksmKeyLocationInfo.getLength());
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.hdds.client.BlockID;
|
import org.apache.hadoop.hdds.client.BlockID;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
|
||||||
@ -163,10 +164,12 @@ public void addPreallocateBlocks(KsmKeyLocationInfoGroup version,
|
|||||||
|
|
||||||
private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
|
private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
ContainerInfo container = scmClient.getContainer(
|
ContainerWithPipeline containerWithPipeline = scmClient
|
||||||
subKeyInfo.getContainerID());
|
.getContainerWithPipeline(subKeyInfo.getContainerID());
|
||||||
|
ContainerInfo container = containerWithPipeline.getContainerInfo();
|
||||||
|
|
||||||
XceiverClientSpi xceiverClient =
|
XceiverClientSpi xceiverClient =
|
||||||
xceiverClientManager.acquireClient(container.getPipeline(),
|
xceiverClientManager.acquireClient(containerWithPipeline.getPipeline(),
|
||||||
container.getContainerID());
|
container.getContainerID());
|
||||||
// create container if needed
|
// create container if needed
|
||||||
if (subKeyInfo.getShouldCreateContainer()) {
|
if (subKeyInfo.getShouldCreateContainer()) {
|
||||||
|
@ -0,0 +1,30 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.ozone.protocolPB;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper class for converting protobuf objects.
|
||||||
|
*/
|
||||||
|
public final class OzonePBHelper {
|
||||||
|
|
||||||
|
private OzonePBHelper() {
|
||||||
|
/** Hidden constructor */
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
import com.google.common.primitives.Longs;
|
import com.google.common.primitives.Longs;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
@ -30,7 +31,6 @@
|
|||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.charset.Charset;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.NavigableSet;
|
import java.util.NavigableSet;
|
||||||
@ -71,31 +71,35 @@ public void cleanUp() {
|
|||||||
@Test
|
@Test
|
||||||
public void testAllocateContainer() throws IOException {
|
public void testAllocateContainer() throws IOException {
|
||||||
// Allocate a container and verify the container info
|
// Allocate a container and verify the container info
|
||||||
ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container1 = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getType(),
|
||||||
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
ContainerInfo info = containerStateManager
|
ContainerInfo info = containerStateManager
|
||||||
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.ALLOCATED);
|
HddsProtos.LifeCycleState.ALLOCATED);
|
||||||
Assert.assertEquals(container1.getContainerID(), info.getContainerID());
|
Assert.assertEquals(container1.getContainerInfo().getContainerID(),
|
||||||
|
info.getContainerID());
|
||||||
Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
|
Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
|
||||||
Assert.assertEquals(containerOwner, info.getOwner());
|
Assert.assertEquals(containerOwner, info.getOwner());
|
||||||
Assert.assertEquals(xceiverClientManager.getType(),
|
Assert.assertEquals(xceiverClientManager.getType(),
|
||||||
info.getPipeline().getType());
|
info.getReplicationType());
|
||||||
Assert.assertEquals(xceiverClientManager.getFactor(),
|
Assert.assertEquals(xceiverClientManager.getFactor(),
|
||||||
info.getPipeline().getFactor());
|
info.getReplicationFactor());
|
||||||
Assert.assertEquals(HddsProtos.LifeCycleState.ALLOCATED, info.getState());
|
Assert.assertEquals(HddsProtos.LifeCycleState.ALLOCATED, info.getState());
|
||||||
|
|
||||||
// Check there are two containers in ALLOCATED state after allocation
|
// Check there are two containers in ALLOCATED state after allocation
|
||||||
ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container2 = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getType(),
|
||||||
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
int numContainers = containerStateManager
|
int numContainers = containerStateManager
|
||||||
.getMatchingContainerIDs(containerOwner,
|
.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.ALLOCATED).size();
|
HddsProtos.LifeCycleState.ALLOCATED).size();
|
||||||
Assert.assertNotEquals(container1.getContainerID(), container2.getContainerID());
|
Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
|
||||||
|
container2.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(2, numContainers);
|
Assert.assertEquals(2, numContainers);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,14 +109,15 @@ public void testContainerStateManagerRestart() throws IOException {
|
|||||||
|
|
||||||
List<ContainerInfo> containers = new ArrayList<>();
|
List<ContainerInfo> containers = new ArrayList<>();
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
ContainerInfo container = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getType(),
|
||||||
containers.add(container);
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
|
containers.add(container.getContainerInfo());
|
||||||
if (i >= 5) {
|
if (i >= 5) {
|
||||||
scm.getScmContainerManager()
|
scm.getScmContainerManager().updateContainerState(container
|
||||||
.updateContainerState(container.getContainerID(),
|
.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,34 +139,40 @@ public void testContainerStateManagerRestart() throws IOException {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetMatchingContainer() throws IOException {
|
public void testGetMatchingContainer() throws IOException {
|
||||||
ContainerInfo container1 = scm.getClientProtocolServer().
|
ContainerWithPipeline container1 = scm.getClientProtocolServer().
|
||||||
allocateContainer(xceiverClientManager.getType(),
|
allocateContainer(xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
scmContainerMapping.updateContainerState(container1.getContainerID(),
|
scmContainerMapping
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
scmContainerMapping.updateContainerState(container1.getContainerID(),
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
HddsProtos.LifeCycleEvent.CREATED);
|
scmContainerMapping
|
||||||
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATED);
|
||||||
|
|
||||||
ContainerInfo container2 = scm.getClientProtocolServer().
|
ContainerWithPipeline container2 = scm.getClientProtocolServer().
|
||||||
allocateContainer(xceiverClientManager.getType(),
|
allocateContainer(xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
|
|
||||||
ContainerInfo info = containerStateManager
|
ContainerInfo info = containerStateManager
|
||||||
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.OPEN);
|
HddsProtos.LifeCycleState.OPEN);
|
||||||
Assert.assertEquals(container1.getContainerID(), info.getContainerID());
|
Assert.assertEquals(container1.getContainerInfo().getContainerID(),
|
||||||
|
info.getContainerID());
|
||||||
|
|
||||||
info = containerStateManager
|
info = containerStateManager
|
||||||
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.ALLOCATED);
|
HddsProtos.LifeCycleState.ALLOCATED);
|
||||||
Assert.assertEquals(container2.getContainerID(), info.getContainerID());
|
Assert.assertEquals(container2.getContainerInfo().getContainerID(),
|
||||||
|
info.getContainerID());
|
||||||
|
|
||||||
scmContainerMapping.updateContainerState(container2.getContainerID(),
|
scmContainerMapping
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
.updateContainerState(container2.getContainerInfo().getContainerID(),
|
||||||
scmContainerMapping.updateContainerState(container2.getContainerID(),
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
HddsProtos.LifeCycleEvent.CREATED);
|
scmContainerMapping
|
||||||
|
.updateContainerState(container2.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATED);
|
||||||
|
|
||||||
// space has already been allocated in container1, now container 2 should
|
// space has already been allocated in container1, now container 2 should
|
||||||
// be chosen.
|
// be chosen.
|
||||||
@ -169,7 +180,8 @@ public void testGetMatchingContainer() throws IOException {
|
|||||||
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.OPEN);
|
HddsProtos.LifeCycleState.OPEN);
|
||||||
Assert.assertEquals(container2.getContainerID(), info.getContainerID());
|
Assert.assertEquals(container2.getContainerInfo().getContainerID(),
|
||||||
|
info.getContainerID());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ -183,30 +195,33 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
|
|
||||||
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
|
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
|
||||||
// OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
|
// OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
|
||||||
ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container1 = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getType(),
|
||||||
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.ALLOCATED).size();
|
HddsProtos.LifeCycleState.ALLOCATED).size();
|
||||||
Assert.assertEquals(1, containers);
|
Assert.assertEquals(1, containers);
|
||||||
|
|
||||||
scmContainerMapping.updateContainerState(container1.getContainerID(),
|
scmContainerMapping
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.CREATING).size();
|
HddsProtos.LifeCycleState.CREATING).size();
|
||||||
Assert.assertEquals(1, containers);
|
Assert.assertEquals(1, containers);
|
||||||
|
|
||||||
scmContainerMapping.updateContainerState(container1.getContainerID(),
|
scmContainerMapping
|
||||||
HddsProtos.LifeCycleEvent.CREATED);
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATED);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.OPEN).size();
|
HddsProtos.LifeCycleState.OPEN).size();
|
||||||
Assert.assertEquals(1, containers);
|
Assert.assertEquals(1, containers);
|
||||||
|
|
||||||
scmContainerMapping
|
scmContainerMapping
|
||||||
.updateContainerState(container1.getContainerID(),
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.FINALIZE);
|
HddsProtos.LifeCycleEvent.FINALIZE);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
@ -214,7 +229,7 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
Assert.assertEquals(1, containers);
|
Assert.assertEquals(1, containers);
|
||||||
|
|
||||||
scmContainerMapping
|
scmContainerMapping
|
||||||
.updateContainerState(container1.getContainerID(),
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CLOSE);
|
HddsProtos.LifeCycleEvent.CLOSE);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
@ -222,7 +237,7 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
Assert.assertEquals(1, containers);
|
Assert.assertEquals(1, containers);
|
||||||
|
|
||||||
scmContainerMapping
|
scmContainerMapping
|
||||||
.updateContainerState(container1.getContainerID(),
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.DELETE);
|
HddsProtos.LifeCycleEvent.DELETE);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
@ -230,7 +245,7 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
Assert.assertEquals(1, containers);
|
Assert.assertEquals(1, containers);
|
||||||
|
|
||||||
scmContainerMapping
|
scmContainerMapping
|
||||||
.updateContainerState(container1.getContainerID(),
|
.updateContainerState(container1.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CLEANUP);
|
HddsProtos.LifeCycleEvent.CLEANUP);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
@ -239,13 +254,15 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
|
|
||||||
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
|
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
|
||||||
// DELETING
|
// DELETING
|
||||||
ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container2 = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getType(),
|
||||||
scmContainerMapping.updateContainerState(container2.getContainerID(),
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
|
||||||
scmContainerMapping
|
scmContainerMapping
|
||||||
.updateContainerState(container2.getContainerID(),
|
.updateContainerState(container2.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
|
scmContainerMapping
|
||||||
|
.updateContainerState(container2.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.TIMEOUT);
|
HddsProtos.LifeCycleEvent.TIMEOUT);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
@ -254,17 +271,21 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
|
|
||||||
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
|
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
|
||||||
// OPEN -> CLOSING -> CLOSED
|
// OPEN -> CLOSING -> CLOSED
|
||||||
ContainerInfo container3 = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container3 = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getType(),
|
||||||
scmContainerMapping.updateContainerState(container3.getContainerID(),
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
|
||||||
scmContainerMapping.updateContainerState(container3.getContainerID(),
|
|
||||||
HddsProtos.LifeCycleEvent.CREATED);
|
|
||||||
scmContainerMapping.updateContainerState(container3.getContainerID(),
|
|
||||||
HddsProtos.LifeCycleEvent.FINALIZE);
|
|
||||||
scmContainerMapping
|
scmContainerMapping
|
||||||
.updateContainerState(container3.getContainerID(),
|
.updateContainerState(container3.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
|
scmContainerMapping
|
||||||
|
.updateContainerState(container3.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.CREATED);
|
||||||
|
scmContainerMapping
|
||||||
|
.updateContainerState(container3.getContainerInfo().getContainerID(),
|
||||||
|
HddsProtos.LifeCycleEvent.FINALIZE);
|
||||||
|
scmContainerMapping
|
||||||
|
.updateContainerState(container3.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CLOSE);
|
HddsProtos.LifeCycleEvent.CLOSE);
|
||||||
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
@ -274,12 +295,14 @@ public void testUpdateContainerState() throws IOException {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testUpdatingAllocatedBytes() throws Exception {
|
public void testUpdatingAllocatedBytes() throws Exception {
|
||||||
ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
|
ContainerWithPipeline container1 = scm.getClientProtocolServer()
|
||||||
xceiverClientManager.getType(),
|
.allocateContainer(xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(), containerOwner);
|
xceiverClientManager.getFactor(), containerOwner);
|
||||||
scmContainerMapping.updateContainerState(container1.getContainerID(),
|
scmContainerMapping.updateContainerState(container1
|
||||||
|
.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CREATE);
|
HddsProtos.LifeCycleEvent.CREATE);
|
||||||
scmContainerMapping.updateContainerState(container1.getContainerID(),
|
scmContainerMapping.updateContainerState(container1
|
||||||
|
.getContainerInfo().getContainerID(),
|
||||||
HddsProtos.LifeCycleEvent.CREATED);
|
HddsProtos.LifeCycleEvent.CREATED);
|
||||||
|
|
||||||
Random ran = new Random();
|
Random ran = new Random();
|
||||||
@ -292,18 +315,18 @@ public void testUpdatingAllocatedBytes() throws Exception {
|
|||||||
.getMatchingContainer(size, containerOwner,
|
.getMatchingContainer(size, containerOwner,
|
||||||
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
|
||||||
HddsProtos.LifeCycleState.OPEN);
|
HddsProtos.LifeCycleState.OPEN);
|
||||||
Assert.assertEquals(container1.getContainerID(), info.getContainerID());
|
Assert.assertEquals(container1.getContainerInfo().getContainerID(),
|
||||||
|
info.getContainerID());
|
||||||
|
|
||||||
ContainerMapping containerMapping =
|
ContainerMapping containerMapping =
|
||||||
(ContainerMapping)scmContainerMapping;
|
(ContainerMapping) scmContainerMapping;
|
||||||
// manually trigger a flush, this will persist the allocated bytes value
|
// manually trigger a flush, this will persist the allocated bytes value
|
||||||
// to disk
|
// to disk
|
||||||
containerMapping.flushContainerInfo();
|
containerMapping.flushContainerInfo();
|
||||||
|
|
||||||
// the persisted value should always be equal to allocated size.
|
// the persisted value should always be equal to allocated size.
|
||||||
byte[] containerBytes =
|
byte[] containerBytes = containerMapping.getContainerStore().get(
|
||||||
containerMapping.getContainerStore().get(
|
Longs.toByteArray(container1.getContainerInfo().getContainerID()));
|
||||||
Longs.toByteArray(container1.getContainerID()));
|
|
||||||
HddsProtos.SCMContainerInfo infoProto =
|
HddsProtos.SCMContainerInfo infoProto =
|
||||||
HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
|
HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
|
||||||
ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto);
|
ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto);
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
package org.apache.hadoop.ozone;
|
package org.apache.hadoop.ozone;
|
||||||
|
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
@ -28,7 +28,6 @@
|
|||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
|
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
|
||||||
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
import org.apache.hadoop.hdds.scm.client.ScmClient;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
|
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
|
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
@ -78,12 +77,12 @@ public static void cleanup() throws Exception {
|
|||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testCreate() throws Exception {
|
public void testCreate() throws Exception {
|
||||||
ContainerInfo container = storageClient.createContainer(HddsProtos
|
ContainerWithPipeline container = storageClient.createContainer(HddsProtos
|
||||||
.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
|
.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
|
||||||
.ONE, "OZONE");
|
.ONE, "OZONE");
|
||||||
assertEquals(container.getContainerID(),
|
assertEquals(container.getContainerInfo().getContainerID(), storageClient
|
||||||
storageClient.getContainer(container.getContainerID()).
|
.getContainer(container.getContainerInfo().getContainerID())
|
||||||
getContainerID());
|
.getContainerID());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
import org.apache.commons.lang3.RandomUtils;
|
import org.apache.commons.lang3.RandomUtils;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
|
import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
|
||||||
import org.apache.hadoop.hdds.scm.server.SCMStorage;
|
import org.apache.hadoop.hdds.scm.server.SCMStorage;
|
||||||
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
import org.apache.hadoop.hdds.scm.node.NodeManager;
|
||||||
@ -131,7 +131,7 @@ private void testRpcPermissionWithConf(
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
ContainerInfo container2 = mockClientServer
|
ContainerWithPipeline container2 = mockClientServer
|
||||||
.allocateContainer(xceiverClientManager.getType(),
|
.allocateContainer(xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, "OZONE");
|
HddsProtos.ReplicationFactor.ONE, "OZONE");
|
||||||
if (expectPermissionDenied) {
|
if (expectPermissionDenied) {
|
||||||
@ -144,7 +144,7 @@ private void testRpcPermissionWithConf(
|
|||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
ContainerInfo container3 = mockClientServer
|
ContainerWithPipeline container3 = mockClientServer
|
||||||
.allocateContainer(xceiverClientManager.getType(),
|
.allocateContainer(xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, "OZONE");
|
HddsProtos.ReplicationFactor.ONE, "OZONE");
|
||||||
if (expectPermissionDenied) {
|
if (expectPermissionDenied) {
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
@ -158,9 +158,11 @@ public List<Long> getAllBlocks(Long containeID) throws IOException {
|
|||||||
|
|
||||||
private MetadataStore getContainerMetadata(Long containerID)
|
private MetadataStore getContainerMetadata(Long containerID)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
ContainerInfo container = cluster.getStorageContainerManager()
|
ContainerWithPipeline containerWithPipeline = cluster
|
||||||
.getClientProtocolServer().getContainer(containerID);
|
.getStorageContainerManager().getClientProtocolServer()
|
||||||
DatanodeDetails leadDN = container.getPipeline().getLeader();
|
.getContainerWithPipeline(containerID);
|
||||||
|
|
||||||
|
DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
|
||||||
OzoneContainer containerServer =
|
OzoneContainer containerServer =
|
||||||
getContainerServerByDatanodeUuid(leadDN.getUuidString());
|
getContainerServerByDatanodeUuid(leadDN.getUuidString());
|
||||||
ContainerData containerData = containerServer.getContainerManager()
|
ContainerData containerData = containerServer.getContainerManager()
|
||||||
|
@ -390,8 +390,8 @@ private boolean verifyRatisReplication(String volumeName, String bucketName,
|
|||||||
keyInfo.getLatestVersionLocations().getLocationList()) {
|
keyInfo.getLatestVersionLocations().getLocationList()) {
|
||||||
ContainerInfo container =
|
ContainerInfo container =
|
||||||
storageContainerLocationClient.getContainer(info.getContainerID());
|
storageContainerLocationClient.getContainer(info.getContainerID());
|
||||||
if ((container.getPipeline().getFactor() != replicationFactor) ||
|
if (!container.getReplicationFactor().equals(replicationFactor) || (
|
||||||
(container.getPipeline().getType() != replicationType)) {
|
container.getReplicationType() != replicationType)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,6 @@
|
|||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
||||||
import org.apache.hadoop.ozone.HddsDatanodeService;
|
import org.apache.hadoop.ozone.HddsDatanodeService;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
@ -35,7 +33,6 @@
|
|||||||
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
import org.apache.hadoop.ozone.client.OzoneClientFactory;
|
||||||
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
|
||||||
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
|
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
|
||||||
import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
|
|
||||||
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
|
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
|
||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
|
||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
|
||||||
@ -112,9 +109,9 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
|
|||||||
.get(0).getBlocksLatestVersionOnly().get(0);
|
.get(0).getBlocksLatestVersionOnly().get(0);
|
||||||
|
|
||||||
long containerID = ksmKeyLocationInfo.getContainerID();
|
long containerID = ksmKeyLocationInfo.getContainerID();
|
||||||
List<DatanodeDetails> datanodes =
|
List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
|
||||||
cluster.getStorageContainerManager().getContainerInfo(containerID)
|
.getScmContainerManager().getContainerWithPipeline(containerID)
|
||||||
.getPipeline().getMachines();
|
.getPipeline().getMachines();
|
||||||
Assert.assertTrue(datanodes.size() == 1);
|
Assert.assertTrue(datanodes.size() == 1);
|
||||||
|
|
||||||
DatanodeDetails datanodeDetails = datanodes.get(0);
|
DatanodeDetails datanodeDetails = datanodes.get(0);
|
||||||
@ -167,9 +164,9 @@ public void testCloseContainerViaStandaAlone()
|
|||||||
.get(0).getBlocksLatestVersionOnly().get(0);
|
.get(0).getBlocksLatestVersionOnly().get(0);
|
||||||
|
|
||||||
long containerID = ksmKeyLocationInfo.getContainerID();
|
long containerID = ksmKeyLocationInfo.getContainerID();
|
||||||
List<DatanodeDetails> datanodes =
|
List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
|
||||||
cluster.getStorageContainerManager().getContainerInfo(containerID)
|
.getScmContainerManager().getContainerWithPipeline(containerID)
|
||||||
.getPipeline().getMachines();
|
.getPipeline().getMachines();
|
||||||
Assert.assertTrue(datanodes.size() == 1);
|
Assert.assertTrue(datanodes.size() == 1);
|
||||||
|
|
||||||
DatanodeDetails datanodeDetails = datanodes.get(0);
|
DatanodeDetails datanodeDetails = datanodes.get(0);
|
||||||
@ -220,9 +217,9 @@ public void testCloseContainerViaRatis() throws IOException,
|
|||||||
.get(0).getBlocksLatestVersionOnly().get(0);
|
.get(0).getBlocksLatestVersionOnly().get(0);
|
||||||
|
|
||||||
long containerID = ksmKeyLocationInfo.getContainerID();
|
long containerID = ksmKeyLocationInfo.getContainerID();
|
||||||
List<DatanodeDetails> datanodes =
|
List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
|
||||||
cluster.getStorageContainerManager().getContainerInfo(containerID)
|
.getScmContainerManager().getContainerWithPipeline(containerID)
|
||||||
.getPipeline().getMachines();
|
.getPipeline().getMachines();
|
||||||
Assert.assertTrue(datanodes.size() == 3);
|
Assert.assertTrue(datanodes.size() == 3);
|
||||||
|
|
||||||
GenericTestUtils.LogCapturer logCapturer =
|
GenericTestUtils.LogCapturer logCapturer =
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
@ -32,7 +33,6 @@
|
|||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
|
||||||
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
|
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
|
||||||
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
|
@ -17,14 +17,12 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.ozone.scm;
|
package org.apache.hadoop.ozone.scm;
|
||||||
|
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
|
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
@ -68,7 +66,7 @@ public static void shutdown() throws InterruptedException {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testAllocate() throws Exception {
|
public void testAllocate() throws Exception {
|
||||||
ContainerInfo container = storageContainerLocationClient.allocateContainer(
|
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
xceiverClientManager.getFactor(),
|
xceiverClientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.hdds.client.BlockID;
|
import org.apache.hadoop.hdds.client.BlockID;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
@ -81,17 +81,18 @@ public static void shutdown() throws InterruptedException {
|
|||||||
@Test
|
@Test
|
||||||
public void testAllocateWrite() throws Exception {
|
public void testAllocateWrite() throws Exception {
|
||||||
String traceID = UUID.randomUUID().toString();
|
String traceID = UUID.randomUUID().toString();
|
||||||
ContainerInfo container =
|
ContainerWithPipeline container =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
XceiverClientSpi client = xceiverClientManager.acquireClient(
|
XceiverClientSpi client = xceiverClientManager
|
||||||
container.getPipeline(), container.getContainerID());
|
.acquireClient(container.getPipeline(),
|
||||||
|
container.getContainerInfo().getContainerID());
|
||||||
ContainerProtocolCalls.createContainer(client,
|
ContainerProtocolCalls.createContainer(client,
|
||||||
container.getContainerID(), traceID);
|
container.getContainerInfo().getContainerID(), traceID);
|
||||||
|
|
||||||
BlockID blockID = ContainerTestHelper.getTestBlockID(
|
BlockID blockID = ContainerTestHelper.getTestBlockID(
|
||||||
container.getContainerID());
|
container.getContainerInfo().getContainerID());
|
||||||
ContainerProtocolCalls.writeSmallFile(client, blockID,
|
ContainerProtocolCalls.writeSmallFile(client, blockID,
|
||||||
"data123".getBytes(), traceID);
|
"data123".getBytes(), traceID);
|
||||||
ContainerProtos.GetSmallFileResponseProto response =
|
ContainerProtos.GetSmallFileResponseProto response =
|
||||||
@ -104,20 +105,21 @@ public void testAllocateWrite() throws Exception {
|
|||||||
@Test
|
@Test
|
||||||
public void testInvalidKeyRead() throws Exception {
|
public void testInvalidKeyRead() throws Exception {
|
||||||
String traceID = UUID.randomUUID().toString();
|
String traceID = UUID.randomUUID().toString();
|
||||||
ContainerInfo container =
|
ContainerWithPipeline container =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
XceiverClientSpi client = xceiverClientManager.acquireClient(
|
XceiverClientSpi client = xceiverClientManager
|
||||||
container.getPipeline(), container.getContainerID());
|
.acquireClient(container.getPipeline(),
|
||||||
|
container.getContainerInfo().getContainerID());
|
||||||
ContainerProtocolCalls.createContainer(client,
|
ContainerProtocolCalls.createContainer(client,
|
||||||
container.getContainerID(), traceID);
|
container.getContainerInfo().getContainerID(), traceID);
|
||||||
|
|
||||||
thrown.expect(StorageContainerException.class);
|
thrown.expect(StorageContainerException.class);
|
||||||
thrown.expectMessage("Unable to find the key");
|
thrown.expectMessage("Unable to find the key");
|
||||||
|
|
||||||
BlockID blockID = ContainerTestHelper.getTestBlockID(
|
BlockID blockID = ContainerTestHelper.getTestBlockID(
|
||||||
container.getContainerID());
|
container.getContainerInfo().getContainerID());
|
||||||
// Try to read a Key Container Name
|
// Try to read a Key Container Name
|
||||||
ContainerProtos.GetSmallFileResponseProto response =
|
ContainerProtos.GetSmallFileResponseProto response =
|
||||||
ContainerProtocolCalls.readSmallFile(client, blockID, traceID);
|
ContainerProtocolCalls.readSmallFile(client, blockID, traceID);
|
||||||
@ -128,20 +130,20 @@ public void testInvalidKeyRead() throws Exception {
|
|||||||
public void testInvalidContainerRead() throws Exception {
|
public void testInvalidContainerRead() throws Exception {
|
||||||
String traceID = UUID.randomUUID().toString();
|
String traceID = UUID.randomUUID().toString();
|
||||||
long nonExistContainerID = 8888L;
|
long nonExistContainerID = 8888L;
|
||||||
ContainerInfo container =
|
ContainerWithPipeline container =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
xceiverClientManager.getType(),
|
xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
XceiverClientSpi client = xceiverClientManager.
|
XceiverClientSpi client = xceiverClientManager
|
||||||
acquireClient(container.getPipeline(), container.getContainerID());
|
.acquireClient(container.getPipeline(),
|
||||||
|
container.getContainerInfo().getContainerID());
|
||||||
ContainerProtocolCalls.createContainer(client,
|
ContainerProtocolCalls.createContainer(client,
|
||||||
container.getContainerID(), traceID);
|
container.getContainerInfo().getContainerID(), traceID);
|
||||||
BlockID blockID = ContainerTestHelper.getTestBlockID(
|
BlockID blockID = ContainerTestHelper.getTestBlockID(
|
||||||
container.getContainerID());
|
container.getContainerInfo().getContainerID());
|
||||||
ContainerProtocolCalls.writeSmallFile(client, blockID,
|
ContainerProtocolCalls.writeSmallFile(client, blockID,
|
||||||
"data123".getBytes(), traceID);
|
"data123".getBytes(), traceID);
|
||||||
|
|
||||||
|
|
||||||
thrown.expect(StorageContainerException.class);
|
thrown.expect(StorageContainerException.class);
|
||||||
thrown.expectMessage("Unable to find the container");
|
thrown.expectMessage("Unable to find the container");
|
||||||
|
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import com.google.common.primitives.Longs;
|
import com.google.common.primitives.Longs;
|
||||||
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
@ -136,7 +137,7 @@ public void testCreateContainer() throws Exception {
|
|||||||
private boolean containerExist(long containerID) {
|
private boolean containerExist(long containerID) {
|
||||||
try {
|
try {
|
||||||
ContainerInfo container = scm.getClientProtocolServer()
|
ContainerInfo container = scm.getClientProtocolServer()
|
||||||
.getContainer(containerID);
|
.getContainerWithPipeline(containerID).getContainerInfo();
|
||||||
return container != null
|
return container != null
|
||||||
&& containerID == container.getContainerID();
|
&& containerID == container.getContainerID();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
@ -157,31 +158,34 @@ public void testDeleteContainer() throws Exception {
|
|||||||
// 1. Test to delete a non-empty container.
|
// 1. Test to delete a non-empty container.
|
||||||
// ****************************************
|
// ****************************************
|
||||||
// Create an non-empty container
|
// Create an non-empty container
|
||||||
ContainerInfo container = containerOperationClient
|
ContainerWithPipeline container = containerOperationClient
|
||||||
.createContainer(xceiverClientManager.getType(),
|
.createContainer(xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
|
|
||||||
ContainerData cdata = ContainerData
|
ContainerData cdata = ContainerData
|
||||||
.getFromProtBuf(containerOperationClient.readContainer(
|
.getFromProtBuf(containerOperationClient.readContainer(
|
||||||
container.getContainerID(), container.getPipeline()), conf);
|
container.getContainerInfo().getContainerID()), conf);
|
||||||
KeyUtils.getDB(cdata, conf).put(Longs.toByteArray(container.getContainerID()),
|
KeyUtils.getDB(cdata, conf)
|
||||||
"someKey".getBytes());
|
.put(Longs.toByteArray(container.getContainerInfo().getContainerID()),
|
||||||
Assert.assertTrue(containerExist(container.getContainerID()));
|
"someKey".getBytes());
|
||||||
|
Assert.assertTrue(
|
||||||
|
containerExist(container.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// Gracefully delete a container should fail because it is open.
|
// Gracefully delete a container should fail because it is open.
|
||||||
delCmd = new String[] {"-container", "-delete", "-c",
|
delCmd = new String[]{"-container", "-delete", "-c",
|
||||||
Long.toString(container.getContainerID())};
|
Long.toString(container.getContainerInfo().getContainerID())};
|
||||||
testErr = new ByteArrayOutputStream();
|
testErr = new ByteArrayOutputStream();
|
||||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||||
exitCode = runCommandAndGetOutput(delCmd, out, testErr);
|
exitCode = runCommandAndGetOutput(delCmd, out, testErr);
|
||||||
assertEquals(EXECUTION_ERROR, exitCode);
|
assertEquals(EXECUTION_ERROR, exitCode);
|
||||||
assertTrue(testErr.toString()
|
assertTrue(testErr.toString()
|
||||||
.contains("Deleting an open container is not allowed."));
|
.contains("Deleting an open container is not allowed."));
|
||||||
Assert.assertTrue(containerExist(container.getContainerID()));
|
Assert.assertTrue(
|
||||||
|
containerExist(container.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// Close the container
|
// Close the container
|
||||||
containerOperationClient.closeContainer(
|
containerOperationClient.closeContainer(
|
||||||
container.getContainerID(), container.getPipeline());
|
container.getContainerInfo().getContainerID());
|
||||||
|
|
||||||
// Gracefully delete a container should fail because it is not empty.
|
// Gracefully delete a container should fail because it is not empty.
|
||||||
testErr = new ByteArrayOutputStream();
|
testErr = new ByteArrayOutputStream();
|
||||||
@ -189,45 +193,49 @@ public void testDeleteContainer() throws Exception {
|
|||||||
assertEquals(EXECUTION_ERROR, exitCode2);
|
assertEquals(EXECUTION_ERROR, exitCode2);
|
||||||
assertTrue(testErr.toString()
|
assertTrue(testErr.toString()
|
||||||
.contains("Container cannot be deleted because it is not empty."));
|
.contains("Container cannot be deleted because it is not empty."));
|
||||||
Assert.assertTrue(containerExist(container.getContainerID()));
|
Assert.assertTrue(
|
||||||
|
containerExist(container.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// Try force delete again.
|
// Try force delete again.
|
||||||
delCmd = new String[] {"-container", "-delete", "-c",
|
delCmd = new String[]{"-container", "-delete", "-c",
|
||||||
Long.toString(container.getContainerID()), "-f"};
|
Long.toString(container.getContainerInfo().getContainerID()), "-f"};
|
||||||
exitCode = runCommandAndGetOutput(delCmd, out, null);
|
exitCode = runCommandAndGetOutput(delCmd, out, null);
|
||||||
assertEquals("Expected success, found:", ResultCode.SUCCESS, exitCode);
|
assertEquals("Expected success, found:", ResultCode.SUCCESS, exitCode);
|
||||||
assertFalse(containerExist(container.getContainerID()));
|
assertFalse(containerExist(container.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// ****************************************
|
// ****************************************
|
||||||
// 2. Test to delete an empty container.
|
// 2. Test to delete an empty container.
|
||||||
// ****************************************
|
// ****************************************
|
||||||
// Create an empty container
|
// Create an empty container
|
||||||
ContainerInfo emptyContainer = containerOperationClient
|
ContainerWithPipeline emptyContainer = containerOperationClient
|
||||||
.createContainer(xceiverClientManager.getType(),
|
.createContainer(xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
containerOperationClient.closeContainer(emptyContainer.getContainerID(),
|
containerOperationClient
|
||||||
container.getPipeline());
|
.closeContainer(emptyContainer.getContainerInfo().getContainerID());
|
||||||
Assert.assertTrue(containerExist(emptyContainer.getContainerID()));
|
Assert.assertTrue(
|
||||||
|
containerExist(emptyContainer.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// Successfully delete an empty container.
|
// Successfully delete an empty container.
|
||||||
delCmd = new String[] {"-container", "-delete", "-c",
|
delCmd = new String[]{"-container", "-delete", "-c",
|
||||||
Long.toString(emptyContainer.getContainerID())};
|
Long.toString(emptyContainer.getContainerInfo().getContainerID())};
|
||||||
exitCode = runCommandAndGetOutput(delCmd, out, null);
|
exitCode = runCommandAndGetOutput(delCmd, out, null);
|
||||||
assertEquals(ResultCode.SUCCESS, exitCode);
|
assertEquals(ResultCode.SUCCESS, exitCode);
|
||||||
assertFalse(containerExist(emptyContainer.getContainerID()));
|
assertFalse(
|
||||||
|
containerExist(emptyContainer.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// After the container is deleted,
|
// After the container is deleted,
|
||||||
// another container can now be recreated.
|
// another container can now be recreated.
|
||||||
ContainerInfo newContainer = containerOperationClient.
|
ContainerWithPipeline newContainer = containerOperationClient.
|
||||||
createContainer(xceiverClientManager.getType(),
|
createContainer(xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
Assert.assertTrue(containerExist(newContainer.getContainerID()));
|
Assert.assertTrue(
|
||||||
|
containerExist(newContainer.getContainerInfo().getContainerID()));
|
||||||
|
|
||||||
// ****************************************
|
// ****************************************
|
||||||
// 3. Test to delete a non-exist container.
|
// 3. Test to delete a non-exist container.
|
||||||
// ****************************************
|
// ****************************************
|
||||||
long nonExistContainerID = ContainerTestHelper.getTestContainerID();
|
long nonExistContainerID = ContainerTestHelper.getTestContainerID();
|
||||||
delCmd = new String[] {"-container", "-delete", "-c",
|
delCmd = new String[]{"-container", "-delete", "-c",
|
||||||
Long.toString(nonExistContainerID)};
|
Long.toString(nonExistContainerID)};
|
||||||
testErr = new ByteArrayOutputStream();
|
testErr = new ByteArrayOutputStream();
|
||||||
exitCode = runCommandAndGetOutput(delCmd, out, testErr);
|
exitCode = runCommandAndGetOutput(delCmd, out, testErr);
|
||||||
@ -250,45 +258,33 @@ public void testInfoContainer() throws Exception {
|
|||||||
"LeaderID: %s\n" +
|
"LeaderID: %s\n" +
|
||||||
"Datanodes: [%s]\n";
|
"Datanodes: [%s]\n";
|
||||||
|
|
||||||
String formatStrWithHash =
|
|
||||||
"Container id: %s\n" +
|
|
||||||
"Container State: %s\n" +
|
|
||||||
"Container Hash: %s\n" +
|
|
||||||
"Container DB Path: %s\n" +
|
|
||||||
"Container Path: %s\n" +
|
|
||||||
"Container Metadata: {%s}\n" +
|
|
||||||
"LeaderID: %s\n" +
|
|
||||||
"Datanodes: [%s]\n";
|
|
||||||
|
|
||||||
// Test a non-exist container
|
// Test a non-exist container
|
||||||
String containerID =
|
String containerID =
|
||||||
Long.toString(ContainerTestHelper.getTestContainerID());
|
Long.toString(ContainerTestHelper.getTestContainerID());
|
||||||
String[] info = { "-container", "-info", containerID };
|
String[] info = {"-container", "-info", containerID};
|
||||||
int exitCode = runCommandAndGetOutput(info, null, null);
|
int exitCode = runCommandAndGetOutput(info, null, null);
|
||||||
assertEquals("Expected Execution Error, Did not find that.",
|
assertEquals("Expected Execution Error, Did not find that.",
|
||||||
EXECUTION_ERROR, exitCode);
|
EXECUTION_ERROR, exitCode);
|
||||||
|
|
||||||
// Create an empty container.
|
// Create an empty container.
|
||||||
ContainerInfo container = containerOperationClient
|
ContainerWithPipeline container = containerOperationClient
|
||||||
.createContainer(xceiverClientManager.getType(),
|
.createContainer(xceiverClientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
ContainerData data = ContainerData
|
ContainerData data = ContainerData.getFromProtBuf(containerOperationClient
|
||||||
.getFromProtBuf(containerOperationClient.
|
.readContainer(container.getContainerInfo().getContainerID()), conf);
|
||||||
readContainer(container.getContainerID(),
|
|
||||||
container.getPipeline()), conf);
|
|
||||||
|
|
||||||
info = new String[] { "-container", "-info", "-c",
|
info = new String[]{"-container", "-info", "-c",
|
||||||
Long.toString(container.getContainerID()) };
|
Long.toString(container.getContainerInfo().getContainerID())};
|
||||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||||
exitCode = runCommandAndGetOutput(info, out, null);
|
exitCode = runCommandAndGetOutput(info, out, null);
|
||||||
assertEquals("Expected Success, did not find it.", ResultCode.SUCCESS,
|
assertEquals("Expected Success, did not find it.", ResultCode.SUCCESS,
|
||||||
exitCode);
|
exitCode);
|
||||||
|
|
||||||
String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
|
String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
|
||||||
String expected =
|
String expected = String.format(formatStr, container.getContainerInfo()
|
||||||
String.format(formatStr, container.getContainerID(), openStatus,
|
.getContainerID(), openStatus, data.getDBPath(),
|
||||||
data.getDBPath(), data.getContainerPath(), "",
|
data.getContainerPath(), "", datanodeDetails.getHostName(),
|
||||||
datanodeDetails.getHostName(), datanodeDetails.getHostName());
|
datanodeDetails.getHostName());
|
||||||
assertEquals(expected, out.toString());
|
assertEquals(expected, out.toString());
|
||||||
|
|
||||||
out.reset();
|
out.reset();
|
||||||
@ -299,40 +295,39 @@ public void testInfoContainer() throws Exception {
|
|||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
data = ContainerData
|
data = ContainerData
|
||||||
.getFromProtBuf(containerOperationClient.readContainer(
|
.getFromProtBuf(containerOperationClient.readContainer(
|
||||||
container.getContainerID(), container.getPipeline()), conf);
|
container.getContainerInfo().getContainerID()), conf);
|
||||||
KeyUtils.getDB(data, conf)
|
KeyUtils.getDB(data, conf)
|
||||||
.put(containerID.getBytes(), "someKey".getBytes());
|
.put(containerID.getBytes(), "someKey".getBytes());
|
||||||
|
|
||||||
info = new String[] { "-container", "-info", "-c",
|
info = new String[]{"-container", "-info", "-c",
|
||||||
Long.toString(container.getContainerID()) };
|
Long.toString(container.getContainerInfo().getContainerID())};
|
||||||
exitCode = runCommandAndGetOutput(info, out, null);
|
exitCode = runCommandAndGetOutput(info, out, null);
|
||||||
assertEquals(ResultCode.SUCCESS, exitCode);
|
assertEquals(ResultCode.SUCCESS, exitCode);
|
||||||
|
|
||||||
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
|
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
|
||||||
expected = String.format(formatStr, container.getContainerID(), openStatus,
|
expected = String.format(formatStr, container.getContainerInfo().
|
||||||
data.getDBPath(), data.getContainerPath(), "",
|
getContainerID(), openStatus, data.getDBPath(),
|
||||||
datanodeDetails.getHostName(), datanodeDetails.getHostName());
|
data.getContainerPath(), "", datanodeDetails.getHostName(),
|
||||||
|
datanodeDetails.getHostName());
|
||||||
assertEquals(expected, out.toString());
|
assertEquals(expected, out.toString());
|
||||||
|
|
||||||
out.reset();
|
out.reset();
|
||||||
|
|
||||||
|
|
||||||
// Close last container and test info again.
|
// Close last container and test info again.
|
||||||
containerOperationClient.closeContainer(
|
containerOperationClient
|
||||||
container.getContainerID(), container.getPipeline());
|
.closeContainer(container.getContainerInfo().getContainerID());
|
||||||
|
|
||||||
info = new String[] { "-container", "-info", "-c",
|
info = new String[]{"-container", "-info", "-c",
|
||||||
Long.toString(container.getContainerID()) };
|
Long.toString(container.getContainerInfo().getContainerID())};
|
||||||
exitCode = runCommandAndGetOutput(info, out, null);
|
exitCode = runCommandAndGetOutput(info, out, null);
|
||||||
assertEquals(ResultCode.SUCCESS, exitCode);
|
assertEquals(ResultCode.SUCCESS, exitCode);
|
||||||
data = ContainerData
|
data = ContainerData.getFromProtBuf(containerOperationClient
|
||||||
.getFromProtBuf(containerOperationClient.readContainer(
|
.readContainer(container.getContainerInfo().getContainerID()), conf);
|
||||||
container.getContainerID(), container.getPipeline()), conf);
|
|
||||||
|
|
||||||
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
|
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
|
||||||
expected = String
|
expected = String
|
||||||
.format(formatStr, container.getContainerID(), openStatus,
|
.format(formatStr, container.getContainerInfo().getContainerID(),
|
||||||
data.getDBPath(), data.getContainerPath(), "",
|
openStatus, data.getDBPath(), data.getContainerPath(), "",
|
||||||
datanodeDetails.getHostName(), datanodeDetails.getHostName());
|
datanodeDetails.getHostName(), datanodeDetails.getHostName());
|
||||||
assertEquals(expected, out.toString());
|
assertEquals(expected, out.toString());
|
||||||
}
|
}
|
||||||
@ -360,10 +355,10 @@ public void testListContainerCommand() throws Exception {
|
|||||||
// Create 20 containers for testing.
|
// Create 20 containers for testing.
|
||||||
List<ContainerInfo> containers = new ArrayList<>();
|
List<ContainerInfo> containers = new ArrayList<>();
|
||||||
for (int index = 0; index < 20; index++) {
|
for (int index = 0; index < 20; index++) {
|
||||||
ContainerInfo container = containerOperationClient.createContainer(
|
ContainerWithPipeline container = containerOperationClient.createContainer(
|
||||||
xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
|
xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
|
||||||
containerOwner);
|
containerOwner);
|
||||||
containers.add(container);
|
containers.add(container.getContainerInfo());
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||||
@ -417,11 +412,11 @@ public void testListContainerCommand() throws Exception {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCloseContainer() throws Exception {
|
public void testCloseContainer() throws Exception {
|
||||||
long containerID = containerOperationClient
|
long containerID = containerOperationClient.createContainer(
|
||||||
.createContainer(xceiverClientManager.getType(),
|
xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner).getContainerID();
|
containerOwner).getContainerInfo().getContainerID();
|
||||||
ContainerInfo container = scm.getClientProtocolServer()
|
ContainerInfo container = scm.getClientProtocolServer()
|
||||||
.getContainer(containerID);
|
.getContainerWithPipeline(containerID).getContainerInfo();
|
||||||
assertNotNull(container);
|
assertNotNull(container);
|
||||||
assertEquals(containerID, container.getContainerID());
|
assertEquals(containerID, container.getContainerID());
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
import com.google.common.cache.Cache;
|
import com.google.common.cache.Cache;
|
||||||
import org.apache.commons.lang3.RandomStringUtils;
|
import org.apache.commons.lang3.RandomStringUtils;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
@ -98,22 +98,25 @@ public void testCaching() throws IOException {
|
|||||||
shouldUseGrpc);
|
shouldUseGrpc);
|
||||||
XceiverClientManager clientManager = new XceiverClientManager(conf);
|
XceiverClientManager clientManager = new XceiverClientManager(conf);
|
||||||
|
|
||||||
ContainerInfo container1 = storageContainerLocationClient
|
ContainerWithPipeline container1 = storageContainerLocationClient
|
||||||
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
|
XceiverClientSpi client1 = clientManager
|
||||||
container1.getContainerID());
|
.acquireClient(container1.getPipeline(),
|
||||||
|
container1.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(1, client1.getRefcount());
|
Assert.assertEquals(1, client1.getRefcount());
|
||||||
|
|
||||||
ContainerInfo container2 = storageContainerLocationClient
|
ContainerWithPipeline container2 = storageContainerLocationClient
|
||||||
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
|
XceiverClientSpi client2 = clientManager
|
||||||
container2.getContainerID());
|
.acquireClient(container2.getPipeline(),
|
||||||
|
container2.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(1, client2.getRefcount());
|
Assert.assertEquals(1, client2.getRefcount());
|
||||||
|
|
||||||
XceiverClientSpi client3 = clientManager.acquireClient(container1.getPipeline(),
|
XceiverClientSpi client3 = clientManager
|
||||||
container1.getContainerID());
|
.acquireClient(container1.getPipeline(),
|
||||||
|
container1.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(2, client3.getRefcount());
|
Assert.assertEquals(2, client3.getRefcount());
|
||||||
Assert.assertEquals(2, client1.getRefcount());
|
Assert.assertEquals(2, client1.getRefcount());
|
||||||
Assert.assertEquals(client1, client3);
|
Assert.assertEquals(client1, client3);
|
||||||
@ -132,32 +135,35 @@ public void testFreeByReference() throws IOException {
|
|||||||
Cache<Long, XceiverClientSpi> cache =
|
Cache<Long, XceiverClientSpi> cache =
|
||||||
clientManager.getClientCache();
|
clientManager.getClientCache();
|
||||||
|
|
||||||
ContainerInfo container1 =
|
ContainerWithPipeline container1 =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
clientManager.getType(), HddsProtos.ReplicationFactor.ONE,
|
clientManager.getType(), HddsProtos.ReplicationFactor.ONE,
|
||||||
containerOwner);
|
containerOwner);
|
||||||
XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
|
XceiverClientSpi client1 = clientManager
|
||||||
container1.getContainerID());
|
.acquireClient(container1.getPipeline(),
|
||||||
|
container1.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(1, client1.getRefcount());
|
Assert.assertEquals(1, client1.getRefcount());
|
||||||
Assert.assertEquals(container1.getPipeline(),
|
Assert.assertEquals(container1.getPipeline(),
|
||||||
client1.getPipeline());
|
client1.getPipeline());
|
||||||
|
|
||||||
ContainerInfo container2 =
|
ContainerWithPipeline container2 =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
clientManager.getType(),
|
clientManager.getType(),
|
||||||
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
HddsProtos.ReplicationFactor.ONE, containerOwner);
|
||||||
XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
|
XceiverClientSpi client2 = clientManager
|
||||||
container2.getContainerID());
|
.acquireClient(container2.getPipeline(),
|
||||||
|
container2.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(1, client2.getRefcount());
|
Assert.assertEquals(1, client2.getRefcount());
|
||||||
Assert.assertNotEquals(client1, client2);
|
Assert.assertNotEquals(client1, client2);
|
||||||
|
|
||||||
// least recent container (i.e containerName1) is evicted
|
// least recent container (i.e containerName1) is evicted
|
||||||
XceiverClientSpi nonExistent1 = cache.getIfPresent(container1.getContainerID());
|
XceiverClientSpi nonExistent1 = cache
|
||||||
|
.getIfPresent(container1.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(null, nonExistent1);
|
Assert.assertEquals(null, nonExistent1);
|
||||||
// However container call should succeed because of refcount on the client.
|
// However container call should succeed because of refcount on the client.
|
||||||
String traceID1 = "trace" + RandomStringUtils.randomNumeric(4);
|
String traceID1 = "trace" + RandomStringUtils.randomNumeric(4);
|
||||||
ContainerProtocolCalls.createContainer(client1,
|
ContainerProtocolCalls.createContainer(client1,
|
||||||
container1.getContainerID(), traceID1);
|
container1.getContainerInfo().getContainerID(), traceID1);
|
||||||
|
|
||||||
// After releasing the client, this connection should be closed
|
// After releasing the client, this connection should be closed
|
||||||
// and any container operations should fail
|
// and any container operations should fail
|
||||||
@ -166,7 +172,7 @@ public void testFreeByReference() throws IOException {
|
|||||||
String expectedMessage = "This channel is not connected.";
|
String expectedMessage = "This channel is not connected.";
|
||||||
try {
|
try {
|
||||||
ContainerProtocolCalls.createContainer(client1,
|
ContainerProtocolCalls.createContainer(client1,
|
||||||
container1.getContainerID(), traceID1);
|
container1.getContainerInfo().getContainerID(), traceID1);
|
||||||
Assert.fail("Create container should throw exception on closed"
|
Assert.fail("Create container should throw exception on closed"
|
||||||
+ "client");
|
+ "client");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
@ -186,28 +192,30 @@ public void testFreeByEviction() throws IOException {
|
|||||||
Cache<Long, XceiverClientSpi> cache =
|
Cache<Long, XceiverClientSpi> cache =
|
||||||
clientManager.getClientCache();
|
clientManager.getClientCache();
|
||||||
|
|
||||||
ContainerInfo container1 =
|
ContainerWithPipeline container1 =
|
||||||
storageContainerLocationClient.allocateContainer(
|
storageContainerLocationClient.allocateContainer(
|
||||||
clientManager.getType(),
|
clientManager.getType(),
|
||||||
clientManager.getFactor(), containerOwner);
|
clientManager.getFactor(), containerOwner);
|
||||||
XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
|
XceiverClientSpi client1 = clientManager
|
||||||
container1.getContainerID());
|
.acquireClient(container1.getPipeline(),
|
||||||
|
container1.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(1, client1.getRefcount());
|
Assert.assertEquals(1, client1.getRefcount());
|
||||||
|
|
||||||
clientManager.releaseClient(client1);
|
clientManager.releaseClient(client1);
|
||||||
Assert.assertEquals(0, client1.getRefcount());
|
Assert.assertEquals(0, client1.getRefcount());
|
||||||
|
|
||||||
ContainerInfo container2 = storageContainerLocationClient
|
ContainerWithPipeline container2 = storageContainerLocationClient
|
||||||
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
|
XceiverClientSpi client2 = clientManager
|
||||||
container2.getContainerID());
|
.acquireClient(container2.getPipeline(),
|
||||||
|
container2.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(1, client2.getRefcount());
|
Assert.assertEquals(1, client2.getRefcount());
|
||||||
Assert.assertNotEquals(client1, client2);
|
Assert.assertNotEquals(client1, client2);
|
||||||
|
|
||||||
|
|
||||||
// now client 1 should be evicted
|
// now client 1 should be evicted
|
||||||
XceiverClientSpi nonExistent = cache.getIfPresent(container1.getContainerID());
|
XceiverClientSpi nonExistent = cache
|
||||||
|
.getIfPresent(container1.getContainerInfo().getContainerID());
|
||||||
Assert.assertEquals(null, nonExistent);
|
Assert.assertEquals(null, nonExistent);
|
||||||
|
|
||||||
// Any container operation should now fail
|
// Any container operation should now fail
|
||||||
@ -215,7 +223,7 @@ public void testFreeByEviction() throws IOException {
|
|||||||
String expectedMessage = "This channel is not connected.";
|
String expectedMessage = "This channel is not connected.";
|
||||||
try {
|
try {
|
||||||
ContainerProtocolCalls.createContainer(client1,
|
ContainerProtocolCalls.createContainer(client1,
|
||||||
container1.getContainerID(), traceID2);
|
container1.getContainerInfo().getContainerID(), traceID2);
|
||||||
Assert.fail("Create container should throw exception on closed"
|
Assert.fail("Create container should throw exception on closed"
|
||||||
+ "client");
|
+ "client");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
.ContainerCommandRequestProto;
|
.ContainerCommandRequestProto;
|
||||||
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
|
||||||
.ContainerCommandResponseProto;
|
.ContainerCommandResponseProto;
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
|
||||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
@ -79,14 +79,16 @@ public void testMetrics() throws Exception {
|
|||||||
OzoneConfiguration conf = new OzoneConfiguration();
|
OzoneConfiguration conf = new OzoneConfiguration();
|
||||||
XceiverClientManager clientManager = new XceiverClientManager(conf);
|
XceiverClientManager clientManager = new XceiverClientManager(conf);
|
||||||
|
|
||||||
ContainerInfo container = storageContainerLocationClient
|
ContainerWithPipeline container = storageContainerLocationClient
|
||||||
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
|
||||||
containerOwner);
|
containerOwner);
|
||||||
XceiverClientSpi client = clientManager.acquireClient(
|
XceiverClientSpi client = clientManager
|
||||||
container.getPipeline(), container.getContainerID());
|
.acquireClient(container.getPipeline(),
|
||||||
|
container.getContainerInfo().getContainerID());
|
||||||
|
|
||||||
ContainerCommandRequestProto request = ContainerTestHelper
|
ContainerCommandRequestProto request = ContainerTestHelper
|
||||||
.getCreateContainerRequest(container.getContainerID(),
|
.getCreateContainerRequest(
|
||||||
|
container.getContainerInfo().getContainerID(),
|
||||||
container.getPipeline());
|
container.getPipeline());
|
||||||
client.sendCommand(request);
|
client.sendCommand(request);
|
||||||
|
|
||||||
@ -112,7 +114,7 @@ public void testMetrics() throws Exception {
|
|||||||
// use async interface for testing pending metrics
|
// use async interface for testing pending metrics
|
||||||
for (int i = 0; i < numRequest; i++) {
|
for (int i = 0; i < numRequest; i++) {
|
||||||
BlockID blockID = ContainerTestHelper.
|
BlockID blockID = ContainerTestHelper.
|
||||||
getTestBlockID(container.getContainerID());
|
getTestBlockID(container.getContainerInfo().getContainerID());
|
||||||
ContainerProtos.ContainerCommandRequestProto smallFileRequest;
|
ContainerProtos.ContainerCommandRequestProto smallFileRequest;
|
||||||
|
|
||||||
smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(
|
smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(
|
||||||
|
@ -60,7 +60,9 @@ public void initialize() throws IOException {
|
|||||||
try {
|
try {
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setState(CLOSED)
|
.setState(CLOSED)
|
||||||
.setPipeline(pipeline)
|
.setPipelineName(pipeline.getPipelineName())
|
||||||
|
.setReplicationType(pipeline.getType())
|
||||||
|
.setReplicationFactor(pipeline.getFactor())
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -81,7 +83,9 @@ public void initialize() throws IOException {
|
|||||||
try {
|
try {
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setState(OPEN)
|
.setState(OPEN)
|
||||||
.setPipeline(pipeline)
|
.setPipelineName(pipeline.getPipelineName())
|
||||||
|
.setReplicationType(pipeline.getType())
|
||||||
|
.setReplicationFactor(pipeline.getFactor())
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -101,7 +105,9 @@ public void initialize() throws IOException {
|
|||||||
try {
|
try {
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setState(OPEN)
|
.setState(OPEN)
|
||||||
.setPipeline(pipeline)
|
.setPipelineName(pipeline.getPipelineName())
|
||||||
|
.setReplicationType(pipeline.getType())
|
||||||
|
.setReplicationFactor(pipeline.getFactor())
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
@ -166,7 +172,9 @@ public void createContainerBenchMark(BenchMarkContainerStateMap state,
|
|||||||
int cid = state.containerID.incrementAndGet();
|
int cid = state.containerID.incrementAndGet();
|
||||||
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
ContainerInfo containerInfo = new ContainerInfo.Builder()
|
||||||
.setState(CLOSED)
|
.setState(CLOSED)
|
||||||
.setPipeline(pipeline)
|
.setPipelineName(pipeline.getPipelineName())
|
||||||
|
.setReplicationType(pipeline.getType())
|
||||||
|
.setReplicationFactor(pipeline.getFactor())
|
||||||
// This is bytes allocated for blocks inside container, not the
|
// This is bytes allocated for blocks inside container, not the
|
||||||
// container size
|
// container size
|
||||||
.setAllocatedBytes(0)
|
.setAllocatedBytes(0)
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.primitives.Longs;
|
import com.google.common.primitives.Longs;
|
||||||
|
import com.google.protobuf.ByteString;
|
||||||
import org.apache.commons.cli.BasicParser;
|
import org.apache.commons.cli.BasicParser;
|
||||||
import org.apache.commons.cli.CommandLine;
|
import org.apache.commons.cli.CommandLine;
|
||||||
import org.apache.commons.cli.Option;
|
import org.apache.commons.cli.Option;
|
||||||
@ -37,7 +38,6 @@
|
|||||||
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
|
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
|
||||||
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
|
import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
|
||||||
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline;
|
|
||||||
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
@ -86,12 +86,12 @@ public class SQLCLI extends Configured implements Tool {
|
|||||||
private static final String CREATE_CONTAINER_INFO =
|
private static final String CREATE_CONTAINER_INFO =
|
||||||
"CREATE TABLE containerInfo (" +
|
"CREATE TABLE containerInfo (" +
|
||||||
"containerID LONG PRIMARY KEY NOT NULL, " +
|
"containerID LONG PRIMARY KEY NOT NULL, " +
|
||||||
"leaderUUID TEXT NOT NULL)";
|
"replicationType TEXT NOT NULL," +
|
||||||
private static final String CREATE_CONTAINER_MEMBERS =
|
"replicationFactor TEXT NOT NULL," +
|
||||||
"CREATE TABLE containerMembers (" +
|
"usedBytes LONG NOT NULL," +
|
||||||
"containerName TEXT NOT NULL, " +
|
"allocatedBytes LONG NOT NULL," +
|
||||||
"datanodeUUID TEXT NOT NULL," +
|
"owner TEXT," +
|
||||||
"PRIMARY KEY(containerName, datanodeUUID));";
|
"numberOfKeys LONG)";
|
||||||
private static final String CREATE_DATANODE_INFO =
|
private static final String CREATE_DATANODE_INFO =
|
||||||
"CREATE TABLE datanodeInfo (" +
|
"CREATE TABLE datanodeInfo (" +
|
||||||
"hostName TEXT NOT NULL, " +
|
"hostName TEXT NOT NULL, " +
|
||||||
@ -99,8 +99,10 @@ public class SQLCLI extends Configured implements Tool {
|
|||||||
"ipAddress TEXT, " +
|
"ipAddress TEXT, " +
|
||||||
"containerPort INTEGER NOT NULL);";
|
"containerPort INTEGER NOT NULL);";
|
||||||
private static final String INSERT_CONTAINER_INFO =
|
private static final String INSERT_CONTAINER_INFO =
|
||||||
"INSERT INTO containerInfo (containerID, leaderUUID) " +
|
"INSERT INTO containerInfo (containerID, replicationType, "
|
||||||
"VALUES (\"%d\", \"%s\")";
|
+ "replicationFactor, usedBytes, allocatedBytes, owner, "
|
||||||
|
+ "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", \"%d\", "
|
||||||
|
+ "\"%s\", \"%d\")";
|
||||||
private static final String INSERT_DATANODE_INFO =
|
private static final String INSERT_DATANODE_INFO =
|
||||||
"INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
|
"INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
|
||||||
"containerPort) " +
|
"containerPort) " +
|
||||||
@ -469,10 +471,7 @@ private void convertContainerDB(Path dbPath, Path outPath)
|
|||||||
.setConf(conf).setDbFile(dbFile).build();
|
.setConf(conf).setDbFile(dbFile).build();
|
||||||
Connection conn = connectDB(outPath.toString())) {
|
Connection conn = connectDB(outPath.toString())) {
|
||||||
executeSQL(conn, CREATE_CONTAINER_INFO);
|
executeSQL(conn, CREATE_CONTAINER_INFO);
|
||||||
executeSQL(conn, CREATE_CONTAINER_MEMBERS);
|
|
||||||
executeSQL(conn, CREATE_DATANODE_INFO);
|
|
||||||
|
|
||||||
HashSet<String> uuidChecked = new HashSet<>();
|
|
||||||
dbStore.iterate(null, (key, value) -> {
|
dbStore.iterate(null, (key, value) -> {
|
||||||
long containerID = Longs.fromByteArray(key);
|
long containerID = Longs.fromByteArray(key);
|
||||||
ContainerInfo containerInfo = null;
|
ContainerInfo containerInfo = null;
|
||||||
@ -481,8 +480,7 @@ private void convertContainerDB(Path dbPath, Path outPath)
|
|||||||
Preconditions.checkNotNull(containerInfo);
|
Preconditions.checkNotNull(containerInfo);
|
||||||
try {
|
try {
|
||||||
//TODO: include container state to sqllite schema
|
//TODO: include container state to sqllite schema
|
||||||
insertContainerDB(conn, containerID,
|
insertContainerDB(conn, containerInfo, containerID);
|
||||||
containerInfo.getPipeline().getProtobufMessage(), uuidChecked);
|
|
||||||
return true;
|
return true;
|
||||||
} catch (SQLException e) {
|
} catch (SQLException e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
@ -494,38 +492,23 @@ private void convertContainerDB(Path dbPath, Path outPath)
|
|||||||
/**
|
/**
|
||||||
* Insert into the sqlite DB of container.db.
|
* Insert into the sqlite DB of container.db.
|
||||||
* @param conn the connection to the sqlite DB.
|
* @param conn the connection to the sqlite DB.
|
||||||
* @param containerID the id of the container.
|
* @param containerInfo
|
||||||
* @param pipeline the actual container pipeline object.
|
* @param containerID
|
||||||
* @param uuidChecked the uuid that has been already inserted.
|
|
||||||
* @throws SQLException throws exception.
|
* @throws SQLException throws exception.
|
||||||
*/
|
*/
|
||||||
private void insertContainerDB(Connection conn, long containerID,
|
private void insertContainerDB(Connection conn, ContainerInfo containerInfo,
|
||||||
Pipeline pipeline, Set<String> uuidChecked) throws SQLException {
|
long containerID) throws SQLException {
|
||||||
LOG.info("Insert to sql container db, for container {}", containerID);
|
LOG.info("Insert to sql container db, for container {}", containerID);
|
||||||
String insertContainerInfo = String.format(
|
String insertContainerInfo = String.format(
|
||||||
INSERT_CONTAINER_INFO, containerID,
|
INSERT_CONTAINER_INFO, containerID,
|
||||||
pipeline.getLeaderID());
|
containerInfo.getReplicationType(),
|
||||||
executeSQL(conn, insertContainerInfo);
|
containerInfo.getReplicationFactor(),
|
||||||
|
containerInfo.getUsedBytes(),
|
||||||
|
containerInfo.getAllocatedBytes(),
|
||||||
|
containerInfo.getOwner(),
|
||||||
|
containerInfo.getNumberOfKeys());
|
||||||
|
|
||||||
for (HddsProtos.DatanodeDetailsProto dd :
|
executeSQL(conn, insertContainerInfo);
|
||||||
pipeline.getMembersList()) {
|
|
||||||
String uuid = dd.getUuid();
|
|
||||||
if (!uuidChecked.contains(uuid)) {
|
|
||||||
// we may also not use this checked set, but catch exception instead
|
|
||||||
// but this seems a bit cleaner.
|
|
||||||
String ipAddr = dd.getIpAddress();
|
|
||||||
String hostName = dd.getHostName();
|
|
||||||
int containerPort = DatanodeDetails.getFromProtoBuf(dd)
|
|
||||||
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
|
|
||||||
String insertMachineInfo = String.format(
|
|
||||||
INSERT_DATANODE_INFO, hostName, uuid, ipAddr, containerPort);
|
|
||||||
executeSQL(conn, insertMachineInfo);
|
|
||||||
uuidChecked.add(uuid);
|
|
||||||
}
|
|
||||||
String insertContainerMembers = String.format(
|
|
||||||
INSERT_CONTAINER_MEMBERS, containerID, uuid);
|
|
||||||
executeSQL(conn, insertContainerMembers);
|
|
||||||
}
|
|
||||||
LOG.info("Insertion completed.");
|
LOG.info("Insertion completed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user