HDDS-17. Add node to container map class to simplify state in SCM.

Contributed by Anu Engineer.
This commit is contained in:
Anu Engineer 2018-05-12 09:57:42 -07:00
parent 413285569a
commit 1194ec31d7
9 changed files with 631 additions and 4 deletions

View File

@ -38,7 +38,7 @@ public class ContainerID implements Comparable {
*/
public ContainerID(long id) {
Preconditions.checkState(id > 0,
"Container ID should be a positive int");
"Container ID should be a positive long. "+ id);
this.id = id;
}

View File

@ -17,6 +17,6 @@
*/
/**
* Container States management package.
* Container States package.
*/
package org.apache.hadoop.hdds.scm.container.states;

View File

@ -114,6 +114,8 @@ public enum ResultCodes {
FAILED_TO_FIND_BLOCK,
IO_EXCEPTION,
UNEXPECTED_CONTAINER_STATE,
SCM_NOT_INITIALIZED
SCM_NOT_INITIALIZED,
DUPLICATE_DATANODE,
NO_SUCH_DATANODE
}
}

View File

@ -0,0 +1,184 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package org.apache.hadoop.hdds.scm.node.states;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE;
/**
* This data structure maintains the list of containers that is on a datanode.
* This information is built from the DN container reports.
*/
public class Node2ContainerMap {
private final Map<UUID, Set<ContainerID>> dn2ContainerMap;
/**
* Constructs a Node2ContainerMap Object.
*/
public Node2ContainerMap() {
dn2ContainerMap = new ConcurrentHashMap<>();
}
/**
* Returns true if this a datanode that is already tracked by
* Node2ContainerMap.
*
* @param datanodeID - UUID of the Datanode.
* @return True if this is tracked, false if this map does not know about it.
*/
public boolean isKnownDatanode(UUID datanodeID) {
Preconditions.checkNotNull(datanodeID);
return dn2ContainerMap.containsKey(datanodeID);
}
/**
* Insert a new datanode into Node2Container Map.
*
* @param datanodeID -- Datanode UUID
* @param containerIDs - List of ContainerIDs.
*/
public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
throws SCMException {
Preconditions.checkNotNull(containerIDs);
Preconditions.checkNotNull(datanodeID);
if(dn2ContainerMap.putIfAbsent(datanodeID, containerIDs) != null) {
throw new SCMException("Node already exists in the map",
DUPLICATE_DATANODE);
}
}
/**
* Updates the Container list of an existing DN.
*
* @param datanodeID - UUID of DN.
* @param containers - Set of Containers tht is present on DN.
* @throws SCMException - if we don't know about this datanode, for new DN
* use insertNewDatanode.
*/
public void updateDatanodeMap(UUID datanodeID, Set<ContainerID> containers)
throws SCMException {
Preconditions.checkNotNull(datanodeID);
Preconditions.checkNotNull(containers);
if(dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> v) == null){
throw new SCMException("No such datanode", NO_SUCH_DATANODE);
}
}
/**
* Removes datanode Entry from the map
* @param datanodeID - Datanode ID.
*/
public void removeDatanode(UUID datanodeID) {
Preconditions.checkNotNull(datanodeID);
dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null);
}
/**
* Returns null if there no containers associated with this datanode ID.
*
* @param datanode - UUID
* @return Set of containers or Null.
*/
public Set<ContainerID> getContainers(UUID datanode) {
Preconditions.checkNotNull(datanode);
return dn2ContainerMap.computeIfPresent(datanode, (k, v) ->
Collections.unmodifiableSet(v));
}
public ReportResult processReport(UUID datanodeID, Set<ContainerID>
containers) {
Preconditions.checkNotNull(datanodeID);
Preconditions.checkNotNull(containers);
if (!isKnownDatanode(datanodeID)) {
return ReportResult.ReportResultBuilder.newBuilder()
.setStatus(ReportStatus.NEW_DATANODE_FOUND)
.setNewContainers(containers)
.build();
}
// Conditions like Zero length containers should be handled by removeAll.
Set<ContainerID> currentSet = dn2ContainerMap.get(datanodeID);
TreeSet<ContainerID> newContainers = new TreeSet<>(containers);
newContainers.removeAll(currentSet);
TreeSet<ContainerID> missingContainers = new TreeSet<>(currentSet);
missingContainers.removeAll(containers);
if (newContainers.isEmpty() && missingContainers.isEmpty()) {
return ReportResult.ReportResultBuilder.newBuilder()
.setStatus(ReportStatus.ALL_IS_WELL)
.build();
}
if (newContainers.isEmpty() && !missingContainers.isEmpty()) {
return ReportResult.ReportResultBuilder.newBuilder()
.setStatus(ReportStatus.MISSING_CONTAINERS)
.setMissingContainers(missingContainers)
.build();
}
if (!newContainers.isEmpty() && missingContainers.isEmpty()) {
return ReportResult.ReportResultBuilder.newBuilder()
.setStatus(ReportStatus.NEW_CONTAINERS_FOUND)
.setNewContainers(newContainers)
.build();
}
if (!newContainers.isEmpty() && !missingContainers.isEmpty()) {
return ReportResult.ReportResultBuilder.newBuilder()
.setStatus(ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND)
.setNewContainers(newContainers)
.setMissingContainers(missingContainers)
.build();
}
// default status & Make compiler happy
return ReportResult.ReportResultBuilder.newBuilder()
.setStatus(ReportStatus.ALL_IS_WELL)
.build();
}
/**
* Results possible from processing a container report by
* Node2ContainerMapper.
*/
public enum ReportStatus {
ALL_IS_WELL,
MISSING_CONTAINERS,
NEW_CONTAINERS_FOUND,
MISSING_AND_NEW_CONTAINERS_FOUND,
NEW_DATANODE_FOUND
}
}

View File

@ -0,0 +1,86 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.hdds.scm.node.states;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import java.util.Set;
/**
* A Container Report gets processsed by the Node2Container and returns the
* Report Result class.
*/
public class ReportResult {
private Node2ContainerMap.ReportStatus status;
private Set<ContainerID> missingContainers;
private Set<ContainerID> newContainers;
ReportResult(Node2ContainerMap.ReportStatus status,
Set<ContainerID> missingContainers,
Set<ContainerID> newContainers) {
this.status = status;
this.missingContainers = missingContainers;
this.newContainers = newContainers;
}
public Node2ContainerMap.ReportStatus getStatus() {
return status;
}
public Set<ContainerID> getMissingContainers() {
return missingContainers;
}
public Set<ContainerID> getNewContainers() {
return newContainers;
}
static class ReportResultBuilder {
private Node2ContainerMap.ReportStatus status;
private Set<ContainerID> missingContainers;
private Set<ContainerID> newContainers;
static ReportResultBuilder newBuilder() {
return new ReportResultBuilder();
}
public ReportResultBuilder setStatus(
Node2ContainerMap.ReportStatus newstatus) {
this.status = newstatus;
return this;
}
public ReportResultBuilder setMissingContainers(
Set<ContainerID> missingContainersLit) {
this.missingContainers = missingContainersLit;
return this;
}
public ReportResultBuilder setNewContainers(
Set<ContainerID> newContainersList) {
this.newContainers = newContainersList;
return this;
}
ReportResult build() {
return new ReportResult(status, missingContainers, newContainers);
}
}
}

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
/**
* Node States package.
*/
package org.apache.hadoop.hdds.scm.node.states;

View File

@ -0,0 +1,308 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.hdds.scm.node.states;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
/**
* Test classes for Node2ContainerMap.
*/
public class Node2ContainerMapTest {
private final static int DATANODE_COUNT = 300;
private final static int CONTAINER_COUNT = 1000;
private final Map<UUID, TreeSet<ContainerID>> testData = new
ConcurrentHashMap<>();
@Rule
public ExpectedException thrown = ExpectedException.none();
private void generateData() {
for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
TreeSet<ContainerID> currentSet = new TreeSet<>();
for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex;
currentSet.add(new ContainerID(currentCnIndex));
}
testData.put(UUID.randomUUID(), currentSet);
}
}
private UUID getFirstKey() {
return testData.keySet().iterator().next();
}
@Before
public void setUp() throws Exception {
generateData();
}
@After
public void tearDown() throws Exception {
}
@Test
public void testIsKnownDatanode() throws SCMException {
Node2ContainerMap map = new Node2ContainerMap();
UUID knownNode = getFirstKey();
UUID unknownNode = UUID.randomUUID();
Set<ContainerID> containerIDs = testData.get(knownNode);
map.insertNewDatanode(knownNode, containerIDs);
Assert.assertTrue("Not able to detect a known node",
map.isKnownDatanode(knownNode));
Assert.assertFalse("Unknown node detected",
map.isKnownDatanode(unknownNode));
}
@Test
public void testInsertNewDatanode() throws SCMException {
Node2ContainerMap map = new Node2ContainerMap();
UUID knownNode = getFirstKey();
Set<ContainerID> containerIDs = testData.get(knownNode);
map.insertNewDatanode(knownNode, containerIDs);
Set<ContainerID> readSet = map.getContainers(knownNode);
// Assert that all elements are present in the set that we read back from
// node map.
Set newSet = new TreeSet((readSet));
Assert.assertTrue(newSet.removeAll(containerIDs));
Assert.assertTrue(newSet.size() == 0);
thrown.expect(SCMException.class);
thrown.expectMessage("already exists");
map.insertNewDatanode(knownNode, containerIDs);
map.removeDatanode(knownNode);
map.insertNewDatanode(knownNode, containerIDs);
}
@Test
public void testProcessReportCheckOneNode() throws SCMException {
UUID key = getFirstKey();
Set<ContainerID> values = testData.get(key);
Node2ContainerMap map = new Node2ContainerMap();
map.insertNewDatanode(key, values);
Assert.assertTrue(map.isKnownDatanode(key));
ReportResult result = map.processReport(key, values);
Assert.assertEquals(result.getStatus(),
Node2ContainerMap.ReportStatus.ALL_IS_WELL);
}
@Test
public void testProcessReportInsertAll() throws SCMException {
Node2ContainerMap map = new Node2ContainerMap();
for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) {
map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
}
// Assert all Keys are known datanodes.
for (UUID key : testData.keySet()) {
Assert.assertTrue(map.isKnownDatanode(key));
}
}
/*
For ProcessReport we have to test the following scenarios.
1. New Datanode - A new datanode appears and we have to add that to the
SCM's Node2Container Map.
2. New Container - A Datanode exists, but a new container is added to that
DN. We need to detect that and return a list of added containers.
3. Missing Container - A Datanode exists, but one of the expected container
on that datanode is missing. We need to detect that.
4. We get a container report that has both the missing and new containers.
We need to return separate lists for these.
*/
/**
* Assert that we are able to detect the addition of a new datanode.
*
* @throws SCMException
*/
@Test
public void testProcessReportDetectNewDataNode() throws SCMException {
Node2ContainerMap map = new Node2ContainerMap();
// If we attempt to process a node that is not present in the map,
// we get a result back that says, NEW_NODE_FOUND.
UUID key = getFirstKey();
TreeSet<ContainerID> values = testData.get(key);
ReportResult result = map.processReport(key, values);
Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_DATANODE_FOUND,
result.getStatus());
Assert.assertEquals(result.getNewContainers().size(), values.size());
}
/**
* This test asserts that processReport is able to detect new containers
* when it is added to a datanode. For that we populate the DN with a list
* of containerIDs and then add few more containers and make sure that we
* are able to detect them.
*
* @throws SCMException
*/
@Test
public void testProcessReportDetectNewContainers() throws SCMException {
Node2ContainerMap map = new Node2ContainerMap();
UUID key = getFirstKey();
TreeSet<ContainerID> values = testData.get(key);
map.insertNewDatanode(key, values);
final int newCount = 100;
// This is not a mistake, the treeset seems to be reverse sorted.
ContainerID last = values.pollFirst();
TreeSet<ContainerID> addedContainers = new TreeSet<>();
for (int x = 1; x <= newCount; x++) {
long cTemp = last.getId() + x;
addedContainers.add(new ContainerID(cTemp));
}
// This set is the super set of existing containers and new containers.
TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
newContainersSet.addAll(addedContainers);
ReportResult result = map.processReport(key, newContainersSet);
//Assert that expected size of missing container is same as addedContainers
Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_CONTAINERS_FOUND,
result.getStatus());
Assert.assertEquals(addedContainers.size(),
result.getNewContainers().size());
// Assert that the Container IDs are the same as we added new.
Assert.assertTrue("All objects are not removed.",
result.getNewContainers().removeAll(addedContainers));
}
/**
* This test asserts that processReport is able to detect missing containers
* if they are misssing from a list.
*
* @throws SCMException
*/
@Test
public void testProcessReportDetectMissingContainers() throws SCMException {
Node2ContainerMap map = new Node2ContainerMap();
UUID key = getFirstKey();
TreeSet<ContainerID> values = testData.get(key);
map.insertNewDatanode(key, values);
final int removeCount = 100;
Random r = new Random();
ContainerID first = values.pollLast();
TreeSet<ContainerID> removedContainers = new TreeSet<>();
// Pick a random container to remove it is ok to collide no issues.
for (int x = 0; x < removeCount; x++) {
int startBase = (int) first.getId();
long cTemp = r.nextInt(values.size());
removedContainers.add(new ContainerID(cTemp + startBase));
}
// This set is a new set with some containers removed.
TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
newContainersSet.removeAll(removedContainers);
ReportResult result = map.processReport(key, newContainersSet);
//Assert that expected size of missing container is same as addedContainers
Assert.assertEquals(Node2ContainerMap.ReportStatus.MISSING_CONTAINERS,
result.getStatus());
Assert.assertEquals(removedContainers.size(),
result.getMissingContainers().size());
// Assert that the Container IDs are the same as we added new.
Assert.assertTrue("All missing containers not found.",
result.getMissingContainers().removeAll(removedContainers));
}
@Test
public void testProcessReportDetectNewAndMissingContainers() throws
SCMException {
Node2ContainerMap map = new Node2ContainerMap();
UUID key = getFirstKey();
TreeSet<ContainerID> values = testData.get(key);
map.insertNewDatanode(key, values);
Set<ContainerID> insertedSet = new TreeSet<>();
// Insert nodes from 1..30
for (int x = 1; x <= 30; x++) {
insertedSet.add(new ContainerID(x));
}
final int removeCount = 100;
Random r = new Random();
ContainerID first = values.pollLast();
TreeSet<ContainerID> removedContainers = new TreeSet<>();
// Pick a random container to remove it is ok to collide no issues.
for (int x = 0; x < removeCount; x++) {
int startBase = (int) first.getId();
long cTemp = r.nextInt(values.size());
removedContainers.add(new ContainerID(cTemp + startBase));
}
Set<ContainerID> newSet = new TreeSet<>(values);
newSet.addAll(insertedSet);
newSet.removeAll(removedContainers);
ReportResult result = map.processReport(key, newSet);
Assert.assertEquals(
Node2ContainerMap.ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND,
result.getStatus());
Assert.assertEquals(removedContainers.size(),
result.getMissingContainers().size());
// Assert that the Container IDs are the same as we added new.
Assert.assertTrue("All missing containers not found.",
result.getMissingContainers().removeAll(removedContainers));
Assert.assertEquals(insertedSet.size(),
result.getNewContainers().size());
// Assert that the Container IDs are the same as we added new.
Assert.assertTrue("All inserted containers are not found.",
result.getNewContainers().removeAll(insertedSet));
}
}

View File

@ -0,0 +1,23 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* Test Node2Container Map.
*/
package org.apache.hadoop.hdds.scm.node.states;

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.genesis;
import org.apache.hadoop.conf.StorageUnit;
import org.openjdk.jmh.infra.BenchmarkParams;
import org.openjdk.jmh.infra.IterationParams;
import org.openjdk.jmh.profile.InternalProfiler;
@ -46,7 +47,8 @@ public Collection<? extends Result> afterIteration(BenchmarkParams
long totalHeap = Runtime.getRuntime().totalMemory();
Collection<ScalarResult> samples = new ArrayList<>();
samples.add(new ScalarResult("Max heap", totalHeap, "bytes",
samples.add(new ScalarResult("Max heap",
StorageUnit.BYTES.toGBs(totalHeap), "GBs",
AggregationPolicy.MAX));
return samples;
}