HDDS-1663. Add datanode to network topology cluster during node regis… (#937)
This commit is contained in:
parent
cf84881dea
commit
1732312f45
@ -32,11 +32,23 @@ public interface Node {
|
|||||||
* exclude itself. In another words, its parent's full network location */
|
* exclude itself. In another words, its parent's full network location */
|
||||||
String getNetworkLocation();
|
String getNetworkLocation();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set this node's network location.
|
||||||
|
* @param location it's network location
|
||||||
|
*/
|
||||||
|
void setNetworkLocation(String location);
|
||||||
|
|
||||||
/** @return this node's self name in network topology. This should be node's
|
/** @return this node's self name in network topology. This should be node's
|
||||||
* IP or hostname.
|
* IP or hostname.
|
||||||
* */
|
* */
|
||||||
String getNetworkName();
|
String getNetworkName();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set this node's name, can be hostname or Ipaddress.
|
||||||
|
* @param name it's network name
|
||||||
|
*/
|
||||||
|
void setNetworkName(String name);
|
||||||
|
|
||||||
/** @return this node's full path in network topology. It's the concatenation
|
/** @return this node's full path in network topology. It's the concatenation
|
||||||
* of location and name.
|
* of location and name.
|
||||||
* */
|
* */
|
||||||
|
@ -27,11 +27,11 @@
|
|||||||
*/
|
*/
|
||||||
public class NodeImpl implements Node {
|
public class NodeImpl implements Node {
|
||||||
// host:port#
|
// host:port#
|
||||||
private final String name;
|
private String name;
|
||||||
// string representation of this node's location, such as /dc1/rack1
|
// string representation of this node's location, such as /dc1/rack1
|
||||||
private final String location;
|
private String location;
|
||||||
// location + "/" + name
|
// location + "/" + name
|
||||||
private final String path;
|
private String path;
|
||||||
// which level of the tree the node resides, start from 1 for root
|
// which level of the tree the node resides, start from 1 for root
|
||||||
private int level;
|
private int level;
|
||||||
// node's parent
|
// node's parent
|
||||||
@ -53,10 +53,7 @@ public NodeImpl(String name, String location, int cost) {
|
|||||||
}
|
}
|
||||||
this.name = (name == null) ? ROOT : name;
|
this.name = (name == null) ? ROOT : name;
|
||||||
this.location = NetUtils.normalize(location);
|
this.location = NetUtils.normalize(location);
|
||||||
this.path = this.location.equals(PATH_SEPARATOR_STR) ?
|
this.path = getPath();
|
||||||
this.location + this.name :
|
|
||||||
this.location + PATH_SEPARATOR_STR + this.name;
|
|
||||||
|
|
||||||
this.cost = cost;
|
this.cost = cost;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,6 +81,15 @@ public String getNetworkName() {
|
|||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set this node's name, can be hostname or Ipaddress.
|
||||||
|
* @param networkName it's network name
|
||||||
|
*/
|
||||||
|
public void setNetworkName(String networkName) {
|
||||||
|
this.name = networkName;
|
||||||
|
this.path = getPath();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return this node's network location
|
* @return this node's network location
|
||||||
*/
|
*/
|
||||||
@ -91,6 +97,16 @@ public String getNetworkLocation() {
|
|||||||
return location;
|
return location;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set this node's network location.
|
||||||
|
* @param networkLocation it's network location
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void setNetworkLocation(String networkLocation) {
|
||||||
|
this.location = networkLocation;
|
||||||
|
this.path = getPath();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return this node's full path in network topology. It's the concatenation
|
* @return this node's full path in network topology. It's the concatenation
|
||||||
* of location and name.
|
* of location and name.
|
||||||
@ -197,4 +213,10 @@ public int hashCode() {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
return getNetworkFullPath();
|
return getNetworkFullPath();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String getPath() {
|
||||||
|
return this.location.equals(PATH_SEPARATOR_STR) ?
|
||||||
|
this.location + this.name :
|
||||||
|
this.location + PATH_SEPARATOR_STR + this.name;
|
||||||
|
}
|
||||||
}
|
}
|
@ -145,6 +145,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||||||
<testResource>
|
<testResource>
|
||||||
<directory>${basedir}/../../hadoop-hdds/common/src/main/resources</directory>
|
<directory>${basedir}/../../hadoop-hdds/common/src/main/resources</directory>
|
||||||
</testResource>
|
</testResource>
|
||||||
|
<testResource>
|
||||||
|
<directory>${basedir}/src/test/resources</directory>
|
||||||
|
</testResource>
|
||||||
</testResources>
|
</testResources>
|
||||||
</build>
|
</build>
|
||||||
</project>
|
</project>
|
||||||
|
@ -171,4 +171,13 @@ void processNodeReport(DatanodeDetails datanodeDetails,
|
|||||||
*/
|
*/
|
||||||
// TODO: We can give better name to this method!
|
// TODO: We can give better name to this method!
|
||||||
List<SCMCommand> getCommandQueue(UUID dnID);
|
List<SCMCommand> getCommandQueue(UUID dnID);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given datanode host address, returns the DatanodeDetails for the
|
||||||
|
* node.
|
||||||
|
*
|
||||||
|
* @param address node host address
|
||||||
|
* @return the given datanode, or null if not found
|
||||||
|
*/
|
||||||
|
DatanodeDetails getNode(String address);
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
|
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
||||||
|
import org.apache.hadoop.hdds.scm.net.NetConstants;
|
||||||
|
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
|
||||||
|
import org.apache.hadoop.hdds.scm.net.Node;
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
|
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
|
||||||
import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
|
import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
|
||||||
@ -44,14 +47,19 @@
|
|||||||
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
|
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
|
||||||
import org.apache.hadoop.hdds.protocol.proto
|
import org.apache.hadoop.hdds.protocol.proto
|
||||||
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
|
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
|
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
|
||||||
|
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||||
|
import org.apache.hadoop.net.TableMapping;
|
||||||
import org.apache.hadoop.ozone.OzoneConsts;
|
import org.apache.hadoop.ozone.OzoneConsts;
|
||||||
import org.apache.hadoop.ozone.protocol.VersionResponse;
|
import org.apache.hadoop.ozone.protocol.VersionResponse;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
|
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
|
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
||||||
|
|
||||||
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@ -93,6 +101,9 @@ public class SCMNodeManager implements NodeManager {
|
|||||||
// Node manager MXBean
|
// Node manager MXBean
|
||||||
private ObjectName nmInfoBean;
|
private ObjectName nmInfoBean;
|
||||||
private final StorageContainerManager scmManager;
|
private final StorageContainerManager scmManager;
|
||||||
|
private final NetworkTopology clusterMap;
|
||||||
|
private final DNSToSwitchMapping dnsToSwitchMapping;
|
||||||
|
private final boolean useHostname;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs SCM machine Manager.
|
* Constructs SCM machine Manager.
|
||||||
@ -108,6 +119,18 @@ public SCMNodeManager(OzoneConfiguration conf, String clusterID,
|
|||||||
LOG.info("Entering startup safe mode.");
|
LOG.info("Entering startup safe mode.");
|
||||||
registerMXBean();
|
registerMXBean();
|
||||||
this.metrics = SCMNodeMetrics.create(this);
|
this.metrics = SCMNodeMetrics.create(this);
|
||||||
|
this.clusterMap = scmManager.getClusterMap();
|
||||||
|
Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
|
||||||
|
conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||||
|
TableMapping.class, DNSToSwitchMapping.class);
|
||||||
|
DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
|
||||||
|
dnsToSwitchMappingClass, conf);
|
||||||
|
this.dnsToSwitchMapping =
|
||||||
|
((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
|
||||||
|
: new CachedDNSToSwitchMapping(newInstance));
|
||||||
|
this.useHostname = conf.getBoolean(
|
||||||
|
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void registerMXBean() {
|
private void registerMXBean() {
|
||||||
@ -228,7 +251,19 @@ public RegisteredCommand register(
|
|||||||
datanodeDetails.setIpAddress(dnAddress.getHostAddress());
|
datanodeDetails.setIpAddress(dnAddress.getHostAddress());
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
String location;
|
||||||
|
if (useHostname) {
|
||||||
|
datanodeDetails.setNetworkName(datanodeDetails.getHostName());
|
||||||
|
location = nodeResolve(datanodeDetails.getHostName());
|
||||||
|
} else {
|
||||||
|
datanodeDetails.setNetworkName(datanodeDetails.getIpAddress());
|
||||||
|
location = nodeResolve(datanodeDetails.getIpAddress());
|
||||||
|
}
|
||||||
|
if (location != null) {
|
||||||
|
datanodeDetails.setNetworkLocation(location);
|
||||||
|
}
|
||||||
nodeStateManager.addNode(datanodeDetails);
|
nodeStateManager.addNode(datanodeDetails);
|
||||||
|
clusterMap.add(datanodeDetails);
|
||||||
// Updating Node Report, as registration is successful
|
// Updating Node Report, as registration is successful
|
||||||
processNodeReport(datanodeDetails, nodeReport);
|
processNodeReport(datanodeDetails, nodeReport);
|
||||||
LOG.info("Registered Data node : {}", datanodeDetails);
|
LOG.info("Registered Data node : {}", datanodeDetails);
|
||||||
@ -236,6 +271,7 @@ public RegisteredCommand register(
|
|||||||
LOG.trace("Datanode is already registered. Datanode: {}",
|
LOG.trace("Datanode is already registered. Datanode: {}",
|
||||||
datanodeDetails.toString());
|
datanodeDetails.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
|
return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
|
||||||
.setDatanodeUUID(datanodeDetails.getUuidString())
|
.setDatanodeUUID(datanodeDetails.getUuidString())
|
||||||
.setClusterID(this.clusterID)
|
.setClusterID(this.clusterID)
|
||||||
@ -515,5 +551,36 @@ public List<SCMCommand> getCommandQueue(UUID dnID) {
|
|||||||
return commandQueue.getCommand(dnID);
|
return commandQueue.getCommand(dnID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Given datanode address or host name, returns the DatanodeDetails for the
|
||||||
|
* node.
|
||||||
|
*
|
||||||
|
* @param address node host address
|
||||||
|
* @return the given datanode, or null if not found
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public DatanodeDetails getNode(String address) {
|
||||||
|
Node node = null;
|
||||||
|
String location = nodeResolve(address);
|
||||||
|
if (location != null) {
|
||||||
|
node = clusterMap.getNode(location + NetConstants.PATH_SEPARATOR_STR +
|
||||||
|
address);
|
||||||
|
}
|
||||||
|
return node == null ? null : (DatanodeDetails)node;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String nodeResolve(String hostname) {
|
||||||
|
List<String> hosts = new ArrayList<>(1);
|
||||||
|
hosts.add(hostname);
|
||||||
|
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
|
||||||
|
if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
|
||||||
|
String location = resolvedHosts.get(0);
|
||||||
|
LOG.debug("Resolve datanode {} return location {}", hostname, location);
|
||||||
|
return location;
|
||||||
|
} else {
|
||||||
|
LOG.error("Node {} Resolution failed. Please make sure that DNS table " +
|
||||||
|
"mapping or configured mapping is functional.", hostname);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -372,6 +372,8 @@ public StorageContainerManager(OzoneConfiguration conf,
|
|||||||
private void initializeSystemManagers(OzoneConfiguration conf,
|
private void initializeSystemManagers(OzoneConfiguration conf,
|
||||||
SCMConfigurator configurator)
|
SCMConfigurator configurator)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
clusterMap = new NetworkTopologyImpl(conf);
|
||||||
|
|
||||||
if(configurator.getScmNodeManager() != null) {
|
if(configurator.getScmNodeManager() != null) {
|
||||||
scmNodeManager = configurator.getScmNodeManager();
|
scmNodeManager = configurator.getScmNodeManager();
|
||||||
} else {
|
} else {
|
||||||
@ -379,7 +381,6 @@ private void initializeSystemManagers(OzoneConfiguration conf,
|
|||||||
conf, scmStorageConfig.getClusterID(), this, eventQueue);
|
conf, scmStorageConfig.getClusterID(), this, eventQueue);
|
||||||
}
|
}
|
||||||
|
|
||||||
clusterMap = new NetworkTopologyImpl(conf);
|
|
||||||
ContainerPlacementPolicy containerPlacementPolicy =
|
ContainerPlacementPolicy containerPlacementPolicy =
|
||||||
ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager,
|
ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager,
|
||||||
clusterMap, true);
|
clusterMap, true);
|
||||||
@ -1067,4 +1068,12 @@ public Map<String, Integer> getContainerStateCount() {
|
|||||||
public SCMMetadataStore getScmMetadataStore() {
|
public SCMMetadataStore getScmMetadataStore() {
|
||||||
return scmMetadataStore;
|
return scmMetadataStore;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the SCM network topology cluster.
|
||||||
|
* @return NetworkTopology
|
||||||
|
*/
|
||||||
|
public NetworkTopology getClusterMap() {
|
||||||
|
return this.clusterMap;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,7 @@ public static DatanodeDetails getDatanodeDetails(
|
|||||||
*
|
*
|
||||||
* @return DatanodeDetails
|
* @return DatanodeDetails
|
||||||
*/
|
*/
|
||||||
private static DatanodeDetails createDatanodeDetails(String uuid,
|
public static DatanodeDetails createDatanodeDetails(String uuid,
|
||||||
String hostname, String ipAddress, String networkLocation) {
|
String hostname, String ipAddress, String networkLocation) {
|
||||||
DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
|
DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
|
||||||
DatanodeDetails.Port.Name.STANDALONE, 0);
|
DatanodeDetails.Port.Name.STANDALONE, 0);
|
||||||
|
@ -451,6 +451,11 @@ public List<SCMCommand> getCommandQueue(UUID dnID) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DatanodeDetails getNode(String address) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A class to declare some values for the nodes so that our tests
|
* A class to declare some values for the nodes so that our tests
|
||||||
* won't fail.
|
* won't fail.
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import org.apache.hadoop.hdds.scm.HddsTestUtils;
|
import org.apache.hadoop.hdds.scm.HddsTestUtils;
|
||||||
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
|
||||||
import org.apache.hadoop.hdds.scm.TestUtils;
|
import org.apache.hadoop.hdds.scm.TestUtils;
|
||||||
|
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
|
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
|
||||||
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
|
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
@ -31,6 +32,7 @@
|
|||||||
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
|
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
|
||||||
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
|
||||||
import org.apache.hadoop.hdds.server.events.EventQueue;
|
import org.apache.hadoop.hdds.server.events.EventQueue;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
|
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
|
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
|
||||||
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
|
||||||
@ -56,6 +58,10 @@
|
|||||||
|
|
||||||
import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
||||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
|
||||||
|
.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
|
||||||
|
.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
|
||||||
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
|
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
|
||||||
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
|
import static org.apache.hadoop.hdds.scm.ScmConfigKeys
|
||||||
.OZONE_SCM_DEADNODE_INTERVAL;
|
.OZONE_SCM_DEADNODE_INTERVAL;
|
||||||
@ -945,4 +951,110 @@ public void testHandlingSCMCommandEvent()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test add node into network topology during node register. Datanode
|
||||||
|
* uses Ip address to resolve network location.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testScmRegisterNodeWithIpAddress()
|
||||||
|
throws IOException, InterruptedException, AuthenticationException {
|
||||||
|
testScmRegisterNodeWithNetworkTopology(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test add node into network topology during node register. Datanode
|
||||||
|
* uses hostname to resolve network location.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testScmRegisterNodeWithHostname()
|
||||||
|
throws IOException, InterruptedException, AuthenticationException {
|
||||||
|
testScmRegisterNodeWithNetworkTopology(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test add node into a 4-layer network topology during node register.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testScmRegisterNodeWith4LayerNetworkTopology()
|
||||||
|
throws IOException, InterruptedException, AuthenticationException {
|
||||||
|
OzoneConfiguration conf = getConf();
|
||||||
|
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
|
||||||
|
MILLISECONDS);
|
||||||
|
|
||||||
|
// create table mapping file
|
||||||
|
String[] hostNames = {"host1", "host2", "host3", "host4"};
|
||||||
|
String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
|
||||||
|
String mapFile = this.getClass().getClassLoader()
|
||||||
|
.getResource("nodegroup-mapping").getPath();
|
||||||
|
|
||||||
|
// create and register nodes
|
||||||
|
conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||||
|
"org.apache.hadoop.net.TableMapping");
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile);
|
||||||
|
conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE,
|
||||||
|
"network-topology-nodegroup.xml");
|
||||||
|
final int nodeCount = hostNames.length;
|
||||||
|
// use default IP address to resolve node
|
||||||
|
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
|
||||||
|
DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
|
||||||
|
for (int i = 0; i < nodeCount; i++) {
|
||||||
|
DatanodeDetails node = TestUtils.createDatanodeDetails(
|
||||||
|
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
|
||||||
|
nodeManager.register(node, null, null);
|
||||||
|
nodes[i] = node;
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify network topology cluster has all the registered nodes
|
||||||
|
Thread.sleep(4 * 1000);
|
||||||
|
NetworkTopology clusterMap = scm.getClusterMap();
|
||||||
|
assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
|
||||||
|
assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
|
||||||
|
assertEquals(4, clusterMap.getMaxLevel());
|
||||||
|
List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
|
||||||
|
nodeList.stream().forEach(node ->
|
||||||
|
Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void testScmRegisterNodeWithNetworkTopology(boolean useHostname)
|
||||||
|
throws IOException, InterruptedException, AuthenticationException {
|
||||||
|
OzoneConfiguration conf = getConf();
|
||||||
|
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000,
|
||||||
|
MILLISECONDS);
|
||||||
|
|
||||||
|
// create table mapping file
|
||||||
|
String[] hostNames = {"host1", "host2", "host3", "host4"};
|
||||||
|
String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"};
|
||||||
|
String mapFile = this.getClass().getClassLoader()
|
||||||
|
.getResource("rack-mapping").getPath();
|
||||||
|
|
||||||
|
// create and register nodes
|
||||||
|
conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||||
|
"org.apache.hadoop.net.TableMapping");
|
||||||
|
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile);
|
||||||
|
if (useHostname) {
|
||||||
|
conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true");
|
||||||
|
}
|
||||||
|
final int nodeCount = hostNames.length;
|
||||||
|
// use default IP address to resolve node
|
||||||
|
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
|
||||||
|
DatanodeDetails[] nodes = new DatanodeDetails[nodeCount];
|
||||||
|
for (int i = 0; i < nodeCount; i++) {
|
||||||
|
DatanodeDetails node = TestUtils.createDatanodeDetails(
|
||||||
|
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
|
||||||
|
nodeManager.register(node, null, null);
|
||||||
|
nodes[i] = node;
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify network topology cluster has all the registered nodes
|
||||||
|
Thread.sleep(4 * 1000);
|
||||||
|
NetworkTopology clusterMap = scm.getClusterMap();
|
||||||
|
assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
|
||||||
|
assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
|
||||||
|
assertEquals(3, clusterMap.getMaxLevel());
|
||||||
|
List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
|
||||||
|
nodeList.stream().forEach(node ->
|
||||||
|
Assert.assertTrue(node.getNetworkLocation().equals("/rack1")));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -309,4 +309,9 @@ public void onMessage(CommandForDatanode commandForDatanode,
|
|||||||
public List<SCMCommand> getCommandQueue(UUID dnID) {
|
public List<SCMCommand> getCommandQueue(UUID dnID) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DatanodeDetails getNode(String address) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
24
hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping
Normal file
24
hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
host1 /rack1/ng1
|
||||||
|
host2 /rack1/ng1
|
||||||
|
host3 /rack1/ng2
|
||||||
|
host4 /rack1/ng2
|
||||||
|
1.2.3.4 /rack1/ng1
|
||||||
|
2.3.4.5 /rack1/ng1
|
||||||
|
3.4.5.6 /rack1/ng2
|
||||||
|
4.5.6.7 /rack1/ng2
|
24
hadoop-hdds/server-scm/src/test/resources/rack-mapping
Normal file
24
hadoop-hdds/server-scm/src/test/resources/rack-mapping
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
host1 /rack1
|
||||||
|
host2 /rack1
|
||||||
|
host3 /rack1
|
||||||
|
host4 /rack1
|
||||||
|
1.2.3.4 /rack1
|
||||||
|
2.3.4.5 /rack1
|
||||||
|
3.4.5.6 /rack1
|
||||||
|
4.5.6.7 /rack1
|
17
hadoop-ozone/dist/src/main/compose/ozone-net-topology/.env
vendored
Normal file
17
hadoop-ozone/dist/src/main/compose/ozone-net-topology/.env
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
HDDS_VERSION=0.5.0-SNAPSHOT
|
110
hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-compose.yaml
vendored
Normal file
110
hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-compose.yaml
vendored
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
version: "3"
|
||||||
|
services:
|
||||||
|
datanode_1:
|
||||||
|
image: apache/hadoop-runner:jdk11
|
||||||
|
privileged: true #required by the profiler
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/hadoop
|
||||||
|
ports:
|
||||||
|
- 9864
|
||||||
|
- 9882
|
||||||
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
ipv4_address: 10.5.0.4
|
||||||
|
datanode_2:
|
||||||
|
image: apache/hadoop-runner:jdk11
|
||||||
|
privileged: true #required by the profiler
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/hadoop
|
||||||
|
ports:
|
||||||
|
- 9864
|
||||||
|
- 9882
|
||||||
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
ipv4_address: 10.5.0.5
|
||||||
|
datanode_3:
|
||||||
|
image: apache/hadoop-runner:jdk11
|
||||||
|
privileged: true #required by the profiler
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/hadoop
|
||||||
|
ports:
|
||||||
|
- 9864
|
||||||
|
- 9882
|
||||||
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
ipv4_address: 10.5.0.6
|
||||||
|
datanode_4:
|
||||||
|
image: apache/hadoop-runner:jdk11
|
||||||
|
privileged: true #required by the profiler
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/hadoop
|
||||||
|
ports:
|
||||||
|
- 9864
|
||||||
|
- 9882
|
||||||
|
command: ["/opt/hadoop/bin/ozone","datanode"]
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
ipv4_address: 10.5.0.7
|
||||||
|
om:
|
||||||
|
image: apache/hadoop-runner:jdk11
|
||||||
|
privileged: true #required by the profiler
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/hadoop
|
||||||
|
ports:
|
||||||
|
- 9874:9874
|
||||||
|
environment:
|
||||||
|
ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
command: ["/opt/hadoop/bin/ozone","om"]
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
ipv4_address: 10.5.0.70
|
||||||
|
scm:
|
||||||
|
image: apache/hadoop-runner:jdk11
|
||||||
|
privileged: true #required by the profiler
|
||||||
|
volumes:
|
||||||
|
- ../..:/opt/hadoop
|
||||||
|
ports:
|
||||||
|
- 9876:9876
|
||||||
|
env_file:
|
||||||
|
- ./docker-config
|
||||||
|
environment:
|
||||||
|
ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
|
||||||
|
command: ["/opt/hadoop/bin/ozone","scm"]
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
ipv4_address: 10.5.0.71
|
||||||
|
networks:
|
||||||
|
service_network:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 10.5.0.0/16
|
88
hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-config
vendored
Normal file
88
hadoop-ozone/dist/src/main/compose/ozone-net-topology/docker-config
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
OZONE-SITE.XML_ozone.om.address=om
|
||||||
|
OZONE-SITE.XML_ozone.om.http-address=om:9874
|
||||||
|
OZONE-SITE.XML_ozone.scm.names=scm
|
||||||
|
OZONE-SITE.XML_ozone.enabled=True
|
||||||
|
OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data
|
||||||
|
OZONE-SITE.XML_ozone.scm.block.client.address=scm
|
||||||
|
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
|
||||||
|
OZONE-SITE.XML_ozone.handler.type=distributed
|
||||||
|
OZONE-SITE.XML_ozone.scm.client.address=scm
|
||||||
|
OZONE-SITE.XML_ozone.replication=1
|
||||||
|
OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
|
||||||
|
OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true
|
||||||
|
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
|
||||||
|
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
|
||||||
|
HDFS-SITE.XML_net.topology.node.switch.mapping.impl=org.apache.hadoop.net.TableMapping
|
||||||
|
HDFS-SITE.XML_net.topology.table.file.name=/opt/hadoop/compose/ozone-net-topology/network-config
|
||||||
|
ASYNC_PROFILER_HOME=/opt/profiler
|
||||||
|
LOG4J.PROPERTIES_log4j.rootLogger=DEBUG, ARF
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
||||||
|
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
|
||||||
|
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
|
||||||
|
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
|
||||||
|
LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.ARF=org.apache.log4j.RollingFileAppender
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.ARF.layout=org.apache.log4j.PatternLayout
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.ARF.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
|
||||||
|
LOG4J.PROPERTIES_log4j.appender.ARF.file=/opt/hadoop/logs/${module.name}-${user.name}.log
|
||||||
|
HDDS_DN_OPTS=-Dmodule.name=datanode
|
||||||
|
HDFS_OM_OPTS=-Dmodule.name=om
|
||||||
|
HDFS_STORAGECONTAINERMANAGER_OPTS=-Dmodule.name=scm
|
||||||
|
|
||||||
|
#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
|
||||||
|
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
|
||||||
|
|
||||||
|
#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
|
||||||
|
LOG4J2.PROPERTIES_monitorInterval=30
|
||||||
|
LOG4J2.PROPERTIES_filter=read,write
|
||||||
|
LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
|
||||||
|
LOG4J2.PROPERTIES_filter.read.marker=READ
|
||||||
|
LOG4J2.PROPERTIES_filter.read.onMatch=DENY
|
||||||
|
LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
|
||||||
|
LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
|
||||||
|
LOG4J2.PROPERTIES_filter.write.marker=WRITE
|
||||||
|
LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
|
||||||
|
LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
|
||||||
|
LOG4J2.PROPERTIES_appenders=console, rolling
|
||||||
|
LOG4J2.PROPERTIES_appender.console.type=Console
|
||||||
|
LOG4J2.PROPERTIES_appender.console.name=STDOUT
|
||||||
|
LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
|
||||||
|
LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
|
||||||
|
LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
|
||||||
|
LOG4J2.PROPERTIES_loggers=audit
|
||||||
|
LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
|
||||||
|
LOG4J2.PROPERTIES_logger.audit.name=OMAudit
|
||||||
|
LOG4J2.PROPERTIES_logger.audit.level=INFO
|
||||||
|
LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
|
||||||
|
LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
|
||||||
|
LOG4J2.PROPERTIES_rootLogger.level=INFO
|
||||||
|
LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
|
||||||
|
LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT
|
22
hadoop-ozone/dist/src/main/compose/ozone-net-topology/network-config
vendored
Normal file
22
hadoop-ozone/dist/src/main/compose/ozone-net-topology/network-config
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
10.5.0.4 /rack1
|
||||||
|
10.5.0.5 /rack1
|
||||||
|
10.5.0.6 /rack1
|
||||||
|
10.5.0.7 /rack2
|
||||||
|
10.5.0.8 /rack2
|
||||||
|
10.5.0.9 /rack2
|
35
hadoop-ozone/dist/src/main/compose/ozone-net-topology/test.sh
vendored
Executable file
35
hadoop-ozone/dist/src/main/compose/ozone-net-topology/test.sh
vendored
Executable file
@ -0,0 +1,35 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
export COMPOSE_DIR
|
||||||
|
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
source "$COMPOSE_DIR/../testlib.sh"
|
||||||
|
|
||||||
|
start_docker_env
|
||||||
|
|
||||||
|
#Due to the limitation of the current auditparser test, it should be the
|
||||||
|
#first test in a clean cluster.
|
||||||
|
|
||||||
|
execute_robot_test om auditparser
|
||||||
|
|
||||||
|
execute_robot_test scm basic/basic.robot
|
||||||
|
|
||||||
|
stop_docker_env
|
||||||
|
|
||||||
|
generate_report
|
Loading…
Reference in New Issue
Block a user