HDDS-1817. GetKey fails with IllegalArgumentException.

Signed-off-by: Anu Engineer <aengineer@apache.org>
This commit is contained in:
Nanda kumar 2019-07-25 00:42:24 +05:30 committed by Anu Engineer
parent 1d98a212cb
commit 2546e6ece2
3 changed files with 39 additions and 36 deletions

View File

@ -468,11 +468,4 @@ public boolean isOpen() {
|| state == HddsProtos.LifeCycleState.CLOSING; || state == HddsProtos.LifeCycleState.CLOSING;
} }
/**
* Check if a container is in Open state, but Close has not been initiated.
* @return true if Open, false otherwise.
*/
public boolean isOpenNotClosing() {
return state == HddsProtos.LifeCycleState.OPEN;
}
} }

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.hdds.scm.HddsServerUtil; import org.apache.hadoop.hdds.scm.HddsServerUtil;
import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.ScmUtils;
import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck; import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
@ -68,6 +69,7 @@
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
@ -216,15 +218,14 @@ public ContainerInfo getContainer(long containerID) throws IOException {
@Override @Override
public ContainerWithPipeline getContainerWithPipeline(long containerID) public ContainerWithPipeline getContainerWithPipeline(long containerID)
throws IOException { throws IOException {
Map<String, String> auditMap = Maps.newHashMap(); final ContainerID cid = ContainerID.valueof(containerID);
auditMap.put("containerID", String.valueOf(containerID));
boolean auditSuccess = true;
try { try {
final ContainerInfo container = scm.getContainerManager()
.getContainer(cid);
if (safeModePrecheck.isInSafeMode()) { if (safeModePrecheck.isInSafeMode()) {
ContainerInfo contInfo = scm.getContainerManager() if (container.isOpen()) {
.getContainer(ContainerID.valueof(containerID)); if (!hasRequiredReplicas(container)) {
if (contInfo.isOpen()) {
if (!hasRequiredReplicas(contInfo)) {
throw new SCMException("Open container " + containerID + " doesn't" throw new SCMException("Open container " + containerID + " doesn't"
+ " have enough replicas to service this operation in " + " have enough replicas to service this operation in "
+ "Safe mode.", ResultCodes.SAFE_MODE_EXCEPTION); + "Safe mode.", ResultCodes.SAFE_MODE_EXCEPTION);
@ -233,40 +234,35 @@ public ContainerWithPipeline getContainerWithPipeline(long containerID)
} }
getScm().checkAdminAccess(null); getScm().checkAdminAccess(null);
final ContainerID id = ContainerID.valueof(containerID); Pipeline pipeline;
final ContainerInfo container = scm.getContainerManager(). try {
getContainer(id); pipeline = container.isOpen() ? scm.getPipelineManager()
final Pipeline pipeline; .getPipeline(container.getPipelineID()) : null;
} catch (PipelineNotFoundException ex) {
// The pipeline is destroyed.
pipeline = null;
}
if (container.isOpenNotClosing()) { if (pipeline == null) {
// Ratis pipeline
pipeline = scm.getPipelineManager()
.getPipeline(container.getPipelineID());
} else {
pipeline = scm.getPipelineManager().createPipeline( pipeline = scm.getPipelineManager().createPipeline(
HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationType.STAND_ALONE,
container.getReplicationFactor(), container.getReplicationFactor(),
scm.getContainerManager() scm.getContainerManager()
.getContainerReplicas(id).stream() .getContainerReplicas(cid).stream()
.map(ContainerReplica::getDatanodeDetails) .map(ContainerReplica::getDatanodeDetails)
.collect(Collectors.toList())); .collect(Collectors.toList()));
} }
AUDIT.logReadSuccess(buildAuditMessageForSuccess(
SCMAction.GET_CONTAINER_WITH_PIPELINE,
Collections.singletonMap("containerID", cid.toString())));
return new ContainerWithPipeline(container, pipeline); return new ContainerWithPipeline(container, pipeline);
} catch (IOException ex) { } catch (IOException ex) {
auditSuccess = false; AUDIT.logReadFailure(buildAuditMessageForFailure(
AUDIT.logReadFailure( SCMAction.GET_CONTAINER_WITH_PIPELINE,
buildAuditMessageForFailure(SCMAction.GET_CONTAINER_WITH_PIPELINE, Collections.singletonMap("containerID", cid.toString()), ex));
auditMap, ex)
);
throw ex; throw ex;
} finally {
if(auditSuccess) {
AUDIT.logReadSuccess(
buildAuditMessageForSuccess(SCMAction.GET_CONTAINER_WITH_PIPELINE,
auditMap)
);
}
} }
} }

View File

@ -49,6 +49,8 @@
import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_ACTIONS; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_ACTIONS;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
import static org.apache.hadoop.hdds.scm.events.SCMEvents
.INCREMENTAL_CONTAINER_REPORT;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT; import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS; import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS;
@ -121,6 +123,18 @@ public List<SCMCommand> dispatch(SCMHeartbeatRequestProto heartbeat) {
} }
final List<IncrementalContainerReportProto> icrs =
heartbeat.getIncrementalContainerReportList();
if (icrs.size() > 0) {
LOG.debug("Dispatching ICRs.");
for (IncrementalContainerReportProto icr : icrs) {
eventPublisher.fireEvent(INCREMENTAL_CONTAINER_REPORT,
new IncrementalContainerReportFromDatanode(
datanodeDetails, icr));
}
}
if (heartbeat.hasContainerActions()) { if (heartbeat.hasContainerActions()) {
LOG.debug("Dispatching Container Actions."); LOG.debug("Dispatching Container Actions.");
eventPublisher.fireEvent( eventPublisher.fireEvent(